Merge branch 'release/0.0.2.incubating' into hadoop-eclipse-merge
diff --git a/NOTICE b/NOTICE
index 507b766..bea83bd 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
Apache Hadoop Development Tools
-Copyright 2013 The Apache Software Foundation
+Copyright 2014 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
diff --git a/README.txt b/README.txt
index 38e8bf9..8366cc0 100644
--- a/README.txt
+++ b/README.txt
@@ -1,3 +1,20 @@
+Welcome to Apache Hadoop Development Tools!
+===========================================
+The Hadoop Development Tools (HDT) is a set of plugins for the Eclipse IDE for developing against the Hadoop platform.
+For more information please see the website:
+
+ http://hdt.incubator.apache.org/
+
This is the repository for the Apache Hadoop Development Tools project, currently a podling at the Apache incubator.
-http://hdt.incubator.apache.org/
\ No newline at end of file
+
+Building the Source Code
+------------------------
+
+We recommend Maven 3 and JDK 6 for building Crunch. To build the project run the following Maven command:
+
+ mvn clean install
+
+Default Perm Gem size may not be sufficient for the plugin to work. Plese increase it to 128M by setting JAVA_OPTS/MAVEN_OPTS.
+
+
diff --git a/org.apache.hdt.core/.classpath b/org.apache.hdt.core/.classpath
index 4a37a3a..4a91e22 100644
--- a/org.apache.hdt.core/.classpath
+++ b/org.apache.hdt.core/.classpath
@@ -2,7 +2,7 @@
<classpath>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
- <classpathentry kind="src" path="src/"/>
+ <classpathentry kind="src" path="src"/>
<classpathentry exported="true" kind="lib" path="jars/log4j-1.2.15.jar"/>
<classpathentry kind="output" path="target/classes"/>
</classpath>
diff --git a/org.apache.hdt.core/META-INF/MANIFEST.MF b/org.apache.hdt.core/META-INF/MANIFEST.MF
index 620e9ba..bbe6c84 100644
--- a/org.apache.hdt.core/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.core/META-INF/MANIFEST.MF
@@ -2,13 +2,16 @@
Bundle-ManifestVersion: 2
Bundle-Name: Apache Hadoop Eclipse Plugin
Bundle-SymbolicName: org.apache.hdt.core;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.incubating
Bundle-Activator: org.apache.hdt.core.Activator
Require-Bundle: org.eclipse.core.runtime,
org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
org.eclipse.core.resources;bundle-version="3.6.0",
org.eclipse.emf.ecore;bundle-version="2.6.1";visibility:=reexport,
- org.eclipse.team.core;bundle-version="3.5.100"
+ org.eclipse.jdt.core,
+ org.eclipse.team.core;bundle-version="3.5.100",
+ org.eclipse.swt,
+ org.eclipse.jface
Bundle-RequiredExecutionEnvironment: JavaSE-1.6
Bundle-Vendor: Apache Hadoop
Bundle-ClassPath: .,
@@ -21,6 +24,8 @@
org.apache.hdt.core.internal.model.impl,
org.apache.hdt.core.internal.model.util,
org.apache.hdt.core.internal.zookeeper,
+ org.apache.hdt.core.launch,
+ org.apache.hdt.core.natures,
org.apache.hdt.core.zookeeper,
org.apache.log4j,
org.apache.log4j.chainsaw,
@@ -42,3 +47,4 @@
org.apache.log4j.varia,
org.apache.log4j.xml
Bundle-ActivationPolicy: lazy
+
diff --git a/org.apache.hdt.core/models/Hadoop.ecore b/org.apache.hdt.core/models/Hadoop.ecore
index 2b3e8ea..70207c0 100644
--- a/org.apache.hdt.core/models/Hadoop.ecore
+++ b/org.apache.hdt.core/models/Hadoop.ecore
@@ -30,6 +30,8 @@
<eStructuralFeatures xsi:type="ecore:EAttribute" name="userId" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
<eStructuralFeatures xsi:type="ecore:EAttribute" name="groupIds" upperBound="-1"
eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"/>
+ <eStructuralFeatures xsi:type="ecore:EAttribute" name="version" eType="ecore:EDataType http://www.eclipse.org/emf/2002/Ecore#//EString"
+ defaultValueLiteral="1.0.0.0"/>
</eClassifiers>
<eClassifiers xsi:type="ecore:EClass" name="Servers">
<eStructuralFeatures xsi:type="ecore:EReference" name="hdfsServers" upperBound="-1"
diff --git a/org.apache.hdt.core/plugin.xml b/org.apache.hdt.core/plugin.xml
index 82dcbec..86ca57b 100644
--- a/org.apache.hdt.core/plugin.xml
+++ b/org.apache.hdt.core/plugin.xml
@@ -19,6 +19,9 @@
<plugin>
<extension-point id="org.apache.hdt.core.hdfsClient" name="Apache Hadoop HDFS Client" schema="schema/org.apache.hadoop.eclipse.hdfsclient.exsd"/>
<extension-point id="org.apache.hdt.core.zookeeperClient" name="Apache Hadoop ZooKeeper Client" schema="schema/org.apache.hadoop.eclipse.zookeeperClient.exsd"/>
+ <extension-point id="org.apache.hdt.core.hadoopCluster" name="Apache Hadoop Cluster" schema="schema/org.apache.hadoop.eclipse.hadoopCluster.exsd"/>
+ <extension-point id="org.apache.hdt.core.hadoopHomeReader" name="Apache Hadoop Home Location Reader" schema="schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd"/>
+
<extension
id="org.apache.hadoop.hdfs.filesystem"
name="Apache Hadoop HDFS"
@@ -39,5 +42,15 @@
id="org.apache.hadoop.hdfs">
</repository>
</extension>
+ <extension
+ id="org.apache.hdt.mrnature"
+ name="MapReduce Nature"
+ point="org.eclipse.core.resources.natures">
+ <runtime>
+ <run
+ class="org.apache.hdt.core.natures.MapReduceNature">
+ </run>
+ </runtime>
+ </extension>
</plugin>
diff --git a/org.apache.hdt.core/pom.xml b/org.apache.hdt.core/pom.xml
index a5bbc5e..095e1dc 100644
--- a/org.apache.hdt.core/pom.xml
+++ b/org.apache.hdt.core/pom.xml
@@ -23,7 +23,7 @@
<relativePath>../pom.xml</relativePath>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
</parent>
<artifactId>org.apache.hdt.core</artifactId>
diff --git a/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
new file mode 100644
index 0000000..72d3899
--- /dev/null
+++ b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopCluster.exsd
@@ -0,0 +1,126 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema targetNamespace="org.apache.hdt.core" xmlns="http://www.w3.org/2001/XMLSchema">
+<annotation>
+ <appinfo>
+ <meta.schema plugin="org.apache.hdt.core" id="org.apache.hdt.core.hadoopCluster" name="Apache Hadoop Cluster"/>
+ </appinfo>
+ <documentation>
+ [Enter description of this extension point.]
+ </documentation>
+ </annotation>
+
+ <element name="extension">
+ <annotation>
+ <appinfo>
+ <meta.element />
+ </appinfo>
+ </annotation>
+ <complexType>
+ <choice>
+ <sequence>
+ <element ref="hadoopCluster" minOccurs="0" maxOccurs="unbounded"/>
+ </sequence>
+ </choice>
+ <attribute name="point" type="string" use="required">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ </annotation>
+ </attribute>
+ <attribute name="id" type="string">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ </annotation>
+ </attribute>
+ <attribute name="name" type="string">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ <appinfo>
+ <meta.attribute translatable="true"/>
+ </appinfo>
+ </annotation>
+ </attribute>
+ </complexType>
+ </element>
+
+ <element name="hadoopCluster">
+ <complexType>
+ <attribute name="class" type="string" use="required">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ <appinfo>
+ <meta.attribute kind="java" basedOn="org.apache.hdt.core.launch.AbstractHadoopCluster:"/>
+ </appinfo>
+ </annotation>
+ </attribute>
+ <attribute name="protocolVersion" type="string" use="required">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ </annotation>
+ </attribute>
+ </complexType>
+ </element>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="since"/>
+ </appinfo>
+ <documentation>
+ [Enter the first release in which this extension point appears.]
+ </documentation>
+ </annotation>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="examples"/>
+ </appinfo>
+ <documentation>
+ [Enter extension point usage example here.]
+ </documentation>
+ </annotation>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="apiinfo"/>
+ </appinfo>
+ <documentation>
+ [Enter API information here.]
+ </documentation>
+ </annotation>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="implementation"/>
+ </appinfo>
+ <documentation>
+ [Enter information about supplied implementation of this extension point.]
+ </documentation>
+ </annotation>
+
+
+</schema>
diff --git a/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd
new file mode 100644
index 0000000..bfd8941
--- /dev/null
+++ b/org.apache.hdt.core/schema/org.apache.hadoop.eclipse.hadoopHomeReader.exsd
@@ -0,0 +1,126 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema targetNamespace="org.apache.hdt.core" xmlns="http://www.w3.org/2001/XMLSchema">
+<annotation>
+ <appinfo>
+ <meta.schema plugin="org.apache.hdt.core" id="org.apache.hdt.core.hadoopHomeReader" name="Apache Hadoop Home Location Reader"/>
+ </appinfo>
+ <documentation>
+ [Enter description of this extension point.]
+ </documentation>
+ </annotation>
+
+ <element name="extension">
+ <annotation>
+ <appinfo>
+ <meta.element />
+ </appinfo>
+ </annotation>
+ <complexType>
+ <choice>
+ <sequence>
+ <element ref="hadoopHomeReader" minOccurs="0" maxOccurs="unbounded"/>
+ </sequence>
+ </choice>
+ <attribute name="point" type="string" use="required">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ </annotation>
+ </attribute>
+ <attribute name="id" type="string">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ </annotation>
+ </attribute>
+ <attribute name="name" type="string">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ <appinfo>
+ <meta.attribute translatable="true"/>
+ </appinfo>
+ </annotation>
+ </attribute>
+ </complexType>
+ </element>
+
+ <element name="hadoopHomeReader">
+ <complexType>
+ <attribute name="class" type="string" use="required">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ <appinfo>
+ <meta.attribute kind="java" basedOn="org.apache.hdt.core.AbstractHadoopHomeReader:"/>
+ </appinfo>
+ </annotation>
+ </attribute>
+ <attribute name="protocolVersion" type="string" use="required">
+ <annotation>
+ <documentation>
+
+ </documentation>
+ </annotation>
+ </attribute>
+ </complexType>
+ </element>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="since"/>
+ </appinfo>
+ <documentation>
+ [Enter the first release in which this extension point appears.]
+ </documentation>
+ </annotation>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="examples"/>
+ </appinfo>
+ <documentation>
+ [Enter extension point usage example here.]
+ </documentation>
+ </annotation>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="apiinfo"/>
+ </appinfo>
+ <documentation>
+ [Enter API information here.]
+ </documentation>
+ </annotation>
+
+ <annotation>
+ <appinfo>
+ <meta.section type="implementation"/>
+ </appinfo>
+ <documentation>
+ [Enter information about supplied implementation of this extension point.]
+ </documentation>
+ </annotation>
+
+
+</schema>
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/AbstractHadoopHomeReader.java b/org.apache.hdt.core/src/org/apache/hdt/core/AbstractHadoopHomeReader.java
new file mode 100644
index 0000000..aa61296
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/AbstractHadoopHomeReader.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core;
+
+import java.io.File;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
+
+public abstract class AbstractHadoopHomeReader {
+ private static final Logger logger = Logger.getLogger(AbstractHadoopHomeReader.class);
+ public abstract boolean validateHadoopHome(File location);
+ public abstract List<File> getHadoopJars(File location);
+
+ public static AbstractHadoopHomeReader createReader(String hadoopVersion) throws CoreException {
+ logger.debug("Creating hadoop home reader");
+ IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hadoopHomeReader");
+ for (IConfigurationElement configElement : elementsFor) {
+ String version = configElement.getAttribute("protocolVersion");
+ if (version.equalsIgnoreCase(hadoopVersion)) {
+ return (AbstractHadoopHomeReader)configElement.createExecutableExtension("class");
+ }
+ }
+ throw new CoreException(new Status(Status.ERROR,Activator.BUNDLE_ID,"No Reader found for hadoop version"+hadoopVersion));
+ }
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/HadoopVersion.java b/org.apache.hdt.core/src/org/apache/hdt/core/HadoopVersion.java
new file mode 100644
index 0000000..020b7d9
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/HadoopVersion.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.core;
+
+
+public enum HadoopVersion {
+ Version1("1.1"), Version2("2.2");
+
+ private String displayName;
+
+ private HadoopVersion(String displayName) {
+ this.displayName = displayName;
+ }
+
+ public String getDisplayName() {
+ return displayName;
+ }
+}
+
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
index 937b171..125c9a2 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/HadoopManager.java
@@ -53,7 +53,8 @@
loadServers();
if (servers == null) {
Bundle bundle = Platform.getBundle(Activator.BUNDLE_ID);
- File serversFile = bundle.getBundleContext().getDataFile(MODEL_FILE_NAME);
+ File stateLocation = Platform.getStateLocation(bundle).toFile();
+ File serversFile = new File(stateLocation,MODEL_FILE_NAME);
Resource resource = new ResourceSetImpl().createResource(URI.createFileURI(serversFile.getPath()));
servers = HadoopFactory.eINSTANCE.createServers();
resource.getContents().add(servers);
@@ -64,7 +65,8 @@
private void loadServers() {
Bundle bundle = Platform.getBundle(Activator.BUNDLE_ID);
- File serversFile = bundle.getBundleContext().getDataFile(MODEL_FILE_NAME);
+ File stateLocation = Platform.getStateLocation(bundle).toFile();
+ File serversFile = new File(stateLocation,MODEL_FILE_NAME);
if (serversFile.exists()) {
Resource resource = new ResourceSetImpl().getResource(URI.createFileURI(serversFile.getPath()), true);
servers = (Servers) resource.getContents().get(0);
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
index ffd68ec..2809e55 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSFileStore.java
@@ -115,7 +115,7 @@
* @throws CoreException
*/
private HDFSClient getClient() throws CoreException {
- return HDFSManager.INSTANCE.getClient(getServer().getUri());
+ return HDFSManager.INSTANCE.getClient(getServer().getUri(),getServer().getVersion());
}
/**
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
index 93f0696..8d27d23 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSManager.java
@@ -18,6 +18,7 @@
package org.apache.hdt.core.internal.hdfs;
+import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.LinkedHashMap;
@@ -37,13 +38,19 @@
import org.eclipse.core.resources.IWorkspace;
import org.eclipse.core.resources.IWorkspaceRoot;
import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.resources.WorkspaceJob;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.core.runtime.Platform;
import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.SubProgressMonitor;
+import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.emf.common.util.EList;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
import org.eclipse.team.core.RepositoryProvider;
/**
@@ -143,7 +150,7 @@
* @return
* @throws CoreException
*/
- public HDFSServer createServer(String name, java.net.URI hdfsURI, String userId, List<String> groupIds) throws CoreException {
+ public HDFSServer createServer(String name, java.net.URI hdfsURI, String userId, List<String> groupIds,String version) throws CoreException {
if (hdfsURI.getPath() == null || hdfsURI.getPath().length() < 1) {
try {
hdfsURI = new java.net.URI(hdfsURI.toString() + "/");
@@ -156,6 +163,7 @@
hdfsServer.setName(name);
hdfsServer.setUri(hdfsURI.toString());
hdfsServer.setLoaded(true);
+ hdfsServer.setVersion(version);
if (userId != null)
hdfsServer.setUserId(userId);
if (groupIds != null)
@@ -176,14 +184,40 @@
* @return
* @throws CoreException
*/
- private IProject createIProject(String name, java.net.URI hdfsURI) throws CoreException {
+ private IProject createIProject(String name, final java.net.URI hdfsURI) {
final IWorkspace workspace = ResourcesPlugin.getWorkspace();
- IProject project = workspace.getRoot().getProject(name);
- IProjectDescription pd = workspace.newProjectDescription(name);
- pd.setLocationURI(hdfsURI);
- project.create(pd, new NullProgressMonitor());
- project.open(new NullProgressMonitor());
- RepositoryProvider.map(project, HDFSTeamRepositoryProvider.ID);
+ final IProject project = workspace.getRoot().getProject(name);
+ final IProjectDescription pd = workspace.newProjectDescription(name);
+ WorkspaceJob operation = new WorkspaceJob("Adding HDFS Location") {
+
+ @Override
+ public IStatus runInWorkspace(IProgressMonitor monitor) throws CoreException {
+ monitor.beginTask("Creating Project", 100);
+ try {
+ pd.setLocationURI(hdfsURI);
+ project.create(pd, new SubProgressMonitor(monitor, 70));
+ project.open(IResource.BACKGROUND_REFRESH, new SubProgressMonitor(monitor, 30));
+ RepositoryProvider.map(project, HDFSTeamRepositoryProvider.ID);
+ return Status.OK_STATUS;
+ } catch (final CoreException e) {
+ logger.error("error found in creating HDFS site", e);
+ Display.getDefault().syncExec(new Runnable(){
+ public void run(){
+ MessageDialog.openError(Display.getDefault().getActiveShell(),
+ "HDFS Error", "Unable to create HDFS site :"+e.getMessage());
+ }
+ });
+ deleteServer(getServer(hdfsURI.toString()));
+ return e.getStatus();
+ } finally {
+ monitor.done();
+ }
+ }
+ };
+ operation.setPriority(Job.LONG);
+ operation.setUser(true);
+ operation.setRule(project);
+ operation.schedule();
return project;
}
@@ -204,6 +238,8 @@
}
return uriToServerCacheMap.get(uri);
}
+
+
public String getProjectName(HDFSServer server) {
return serverToProjectMap.get(server);
@@ -245,7 +281,17 @@
String projectName = this.serverToProjectMap.remove(server);
this.projectToServerMap.remove(projectName);
this.uriToServerMap.remove(server.getUri());
+ this.uriToServerCacheMap.remove(server.getUri());
HadoopManager.INSTANCE.saveServers();
+ String tmpUri = server.getUri();
+ while (tmpUri != null && uriToServerCacheMap.containsKey(tmpUri)) {
+ uriToServerCacheMap.remove(tmpUri);
+ int lastSlashIndex = tmpUri.lastIndexOf('/');
+ tmpUri = lastSlashIndex < 0 ? null : tmpUri.substring(0, lastSlashIndex);
+ }
+ if(hdfsClientsMap.containsKey(server.getUri().toString())){
+ hdfsClientsMap.remove(server.getUri().toString());
+ }
}
/**
@@ -255,7 +301,7 @@
* @return
* @throws CoreException
*/
- public HDFSClient getClient(String serverURI) throws CoreException {
+ public HDFSClient getClient(String serverURI,String hdfsVersion) throws CoreException {
if (logger.isDebugEnabled())
logger.debug("getClient(" + serverURI + "): Server=" + serverURI);
HDFSServer server = getServer(serverURI);
@@ -272,8 +318,11 @@
IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hdfsClient");
for (IConfigurationElement element : elementsFor) {
if (sUri.getScheme().equals(element.getAttribute("protocol"))) {
- HDFSClient client = (HDFSClient) element.createExecutableExtension("class");
- hdfsClientsMap.put(serverURI, new InterruptableHDFSClient(serverURI, client));
+ String version = element.getAttribute("protocolVersion");
+ if(hdfsVersion.equalsIgnoreCase(version)){
+ HDFSClient client = (HDFSClient) element.createExecutableExtension("class");
+ hdfsClientsMap.put(serverURI, new InterruptableHDFSClient(serverURI, client));
+ }
}
}
} catch (URISyntaxException e) {
@@ -282,4 +331,18 @@
return hdfsClientsMap.get(serverURI);
}
}
+
+ public static org.eclipse.core.runtime.IStatus addServer(String serverName, String location,
+ String userId, List<String> groupId,String version) {
+ try {
+ HDFSManager.INSTANCE.createServer(serverName, new URI(location), userId, groupId,version);
+ } catch (CoreException e) {
+ logger.warn(e.getMessage(), e);
+ return e.getStatus();
+ } catch (URISyntaxException e) {
+ logger.warn(e.getMessage(), e);
+ return new Status(Status.ERROR,"unknown",e.getMessage(),e);
+ }
+ return Status.OK_STATUS;
+ }
}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
index 0ca0df4..f4fb099 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/HDFSMoveDeleteHook.java
@@ -18,6 +18,7 @@
package org.apache.hdt.core.internal.hdfs;
+import org.apache.hdt.core.internal.model.HDFSServer;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IFolder;
import org.eclipse.core.resources.IProject;
@@ -77,6 +78,8 @@
throw new RuntimeException(
"Deletion of HDFS project root folder is not supported. To remove project uncheck the \'Delete project contents on disk\' checkbox");
}
+ HDFSServer server = HDFSManager.INSTANCE.getServer(project.getLocationURI().toString());
+ HDFSManager.INSTANCE.deleteServer(server);
}
return false;
}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
index 0301d5f..b6e9c46 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/hdfs/InterruptableHDFSClient.java
@@ -28,7 +28,6 @@
import org.apache.hdt.core.hdfs.HDFSClient;
import org.apache.hdt.core.hdfs.ResourceInformation;
import org.apache.hdt.core.internal.model.HDFSServer;
-import org.apache.hdt.core.internal.model.ServerStatus;
import org.apache.log4j.Logger;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.resources.ResourcesPlugin;
@@ -41,9 +40,6 @@
public class InterruptableHDFSClient extends HDFSClient {
private static final int DEFAULT_TIMEOUT = 5000;
private static final Logger logger = Logger.getLogger(InterruptableHDFSClient.class);
- // private static ExecutorService threadPool =
- // Executors.newFixedThreadPool(10);
-
private final HDFSClient client;
private final int timeoutMillis = DEFAULT_TIMEOUT;
private final String serverURI;
@@ -67,12 +63,17 @@
final InterruptedException[] inE = new InterruptedException[1];
Thread runnerThread = new Thread(new Runnable() {
public void run() {
+ Thread current = Thread.currentThread();
+ ClassLoader oldLoader = current.getContextClassLoader();
try {
+ current.setContextClassLoader(client.getClass().getClassLoader());
data.add(runnable.run());
} catch (IOException e) {
ioE[0] = e;
} catch (InterruptedException e) {
inE[0] = e;
+ }finally {
+ current.setContextClassLoader(oldLoader);
}
}
});
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
index be04f74..0419f2a 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HDFSServer.java
@@ -33,6 +33,7 @@
* <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getOperationURIs <em>Operation UR Is</em>}</li>
* <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getUserId <em>User Id</em>}</li>
* <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getGroupIds <em>Group Ids</em>}</li>
+ * <li>{@link org.apache.hdt.core.internal.model.HDFSServer#getVersion <em>Version</em>}</li>
* </ul>
* </p>
*
@@ -124,4 +125,31 @@
*/
EList<String> getGroupIds();
+ /**
+ * Returns the value of the '<em><b>Version</b></em>' attribute.
+ * The default value is <code>"1.0.0.0"</code>.
+ * <!-- begin-user-doc -->
+ * <p>
+ * If the meaning of the '<em>Version</em>' attribute isn't clear,
+ * there really should be more of a description here...
+ * </p>
+ * <!-- end-user-doc -->
+ * @return the value of the '<em>Version</em>' attribute.
+ * @see #setVersion(String)
+ * @see org.apache.hdt.core.internal.model.HadoopPackage#getHDFSServer_Version()
+ * @model default="1.0.0.0"
+ * @generated
+ */
+ String getVersion();
+
+ /**
+ * Sets the value of the '{@link org.apache.hdt.core.internal.model.HDFSServer#getVersion <em>Version</em>}' attribute.
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @param value the new value of the '<em>Version</em>' attribute.
+ * @see #getVersion()
+ * @generated
+ */
+ void setVersion(String value);
+
} // HDFSServer
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
index 8332b4e..f2fd035 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/HadoopPackage.java
@@ -228,13 +228,22 @@
int HDFS_SERVER__GROUP_IDS = SERVER_FEATURE_COUNT + 3;
/**
+ * The feature id for the '<em><b>Version</b></em>' attribute.
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @generated
+ * @ordered
+ */
+ int HDFS_SERVER__VERSION = SERVER_FEATURE_COUNT + 4;
+
+ /**
* The number of structural features of the '<em>HDFS Server</em>' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
* @ordered
*/
- int HDFS_SERVER_FEATURE_COUNT = SERVER_FEATURE_COUNT + 4;
+ int HDFS_SERVER_FEATURE_COUNT = SERVER_FEATURE_COUNT + 5;
/**
* The meta object id for the '{@link org.apache.hdt.core.internal.model.impl.ServersImpl <em>Servers</em>}' class.
@@ -737,6 +746,17 @@
EAttribute getHDFSServer_GroupIds();
/**
+ * Returns the meta object for the attribute '{@link org.apache.hdt.core.internal.model.HDFSServer#getVersion <em>Version</em>}'.
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @return the meta object for the attribute '<em>Version</em>'.
+ * @see org.apache.hdt.core.internal.model.HDFSServer#getVersion()
+ * @see #getHDFSServer()
+ * @generated
+ */
+ EAttribute getHDFSServer_Version();
+
+ /**
* Returns the meta object for class '{@link org.apache.hdt.core.internal.model.Servers <em>Servers</em>}'.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
@@ -1126,6 +1146,14 @@
EAttribute HDFS_SERVER__GROUP_IDS = eINSTANCE.getHDFSServer_GroupIds();
/**
+ * The meta object literal for the '<em><b>Version</b></em>' attribute feature.
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @generated
+ */
+ EAttribute HDFS_SERVER__VERSION = eINSTANCE.getHDFSServer_Version();
+
+ /**
* The meta object literal for the '{@link org.apache.hdt.core.internal.model.impl.ServersImpl <em>Servers</em>}' class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
index ed25f07..5cc260c 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HDFSServerImpl.java
@@ -43,6 +43,7 @@
* <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getOperationURIs <em>Operation UR Is</em>}</li>
* <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getUserId <em>User Id</em>}</li>
* <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getGroupIds <em>Group Ids</em>}</li>
+ * <li>{@link org.apache.hdt.core.internal.model.impl.HDFSServerImpl#getVersion <em>Version</em>}</li>
* </ul>
* </p>
*
@@ -110,6 +111,26 @@
protected EList<String> groupIds;
/**
+ * The default value of the '{@link #getVersion() <em>Version</em>}' attribute.
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @see #getVersion()
+ * @generated
+ * @ordered
+ */
+ protected static final String VERSION_EDEFAULT = "1.0.0.0";
+
+ /**
+ * The cached value of the '{@link #getVersion() <em>Version</em>}' attribute.
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @see #getVersion()
+ * @generated
+ * @ordered
+ */
+ protected String version = VERSION_EDEFAULT;
+
+ /**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
@@ -199,6 +220,27 @@
* <!-- end-user-doc -->
* @generated
*/
+ public String getVersion() {
+ return version;
+ }
+
+ /**
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @generated
+ */
+ public void setVersion(String newVersion) {
+ String oldVersion = version;
+ version = newVersion;
+ if (eNotificationRequired())
+ eNotify(new ENotificationImpl(this, Notification.SET, HadoopPackage.HDFS_SERVER__VERSION, oldVersion, version));
+ }
+
+ /**
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @generated
+ */
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
@@ -210,6 +252,8 @@
return getUserId();
case HadoopPackage.HDFS_SERVER__GROUP_IDS:
return getGroupIds();
+ case HadoopPackage.HDFS_SERVER__VERSION:
+ return getVersion();
}
return super.eGet(featureID, resolve, coreType);
}
@@ -237,6 +281,9 @@
getGroupIds().clear();
getGroupIds().addAll((Collection<? extends String>)newValue);
return;
+ case HadoopPackage.HDFS_SERVER__VERSION:
+ setVersion((String)newValue);
+ return;
}
super.eSet(featureID, newValue);
}
@@ -261,6 +308,9 @@
case HadoopPackage.HDFS_SERVER__GROUP_IDS:
getGroupIds().clear();
return;
+ case HadoopPackage.HDFS_SERVER__VERSION:
+ setVersion(VERSION_EDEFAULT);
+ return;
}
super.eUnset(featureID);
}
@@ -281,6 +331,8 @@
return USER_ID_EDEFAULT == null ? userId != null : !USER_ID_EDEFAULT.equals(userId);
case HadoopPackage.HDFS_SERVER__GROUP_IDS:
return groupIds != null && !groupIds.isEmpty();
+ case HadoopPackage.HDFS_SERVER__VERSION:
+ return VERSION_EDEFAULT == null ? version != null : !VERSION_EDEFAULT.equals(version);
}
return super.eIsSet(featureID);
}
@@ -303,6 +355,8 @@
result.append(userId);
result.append(", groupIds: ");
result.append(groupIds);
+ result.append(", version: ");
+ result.append(version);
result.append(')');
return result.toString();
}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
index c3e5c2b..ac640c8 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopFactoryImpl.java
@@ -44,7 +44,7 @@
*/
public static HadoopFactory init() {
try {
- HadoopFactory theHadoopFactory = (HadoopFactory)EPackage.Registry.INSTANCE.getEFactory("http://hadoop/1.0");
+ HadoopFactory theHadoopFactory = (HadoopFactory)EPackage.Registry.INSTANCE.getEFactory(HadoopPackage.eNS_URI);
if (theHadoopFactory != null) {
return theHadoopFactory;
}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
index a698d56..c436729 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/impl/HadoopPackageImpl.java
@@ -196,6 +196,15 @@
* <!-- end-user-doc -->
* @generated
*/
+ public EAttribute getHDFSServer_Version() {
+ return (EAttribute)hdfsServerEClass.getEStructuralFeatures().get(4);
+ }
+
+ /**
+ * <!-- begin-user-doc -->
+ * <!-- end-user-doc -->
+ * @generated
+ */
public EClass getServers() {
return serversEClass;
}
@@ -494,6 +503,7 @@
createEAttribute(hdfsServerEClass, HDFS_SERVER__OPERATION_UR_IS);
createEAttribute(hdfsServerEClass, HDFS_SERVER__USER_ID);
createEAttribute(hdfsServerEClass, HDFS_SERVER__GROUP_IDS);
+ createEAttribute(hdfsServerEClass, HDFS_SERVER__VERSION);
serversEClass = createEClass(SERVERS);
createEReference(serversEClass, SERVERS__HDFS_SERVERS);
@@ -570,6 +580,7 @@
initEAttribute(getHDFSServer_OperationURIs(), ecorePackage.getEString(), "operationURIs", null, 0, -1, HDFSServer.class, IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
initEAttribute(getHDFSServer_UserId(), ecorePackage.getEString(), "userId", null, 0, 1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
initEAttribute(getHDFSServer_GroupIds(), ecorePackage.getEString(), "groupIds", null, 0, -1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
+ initEAttribute(getHDFSServer_Version(), ecorePackage.getEString(), "version", "1.0.0.0", 0, 1, HDFSServer.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
initEClass(serversEClass, Servers.class, "Servers", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
initEReference(getServers_HdfsServers(), this.getHDFSServer(), null, "hdfsServers", null, 0, -1, Servers.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
index 6f0b337..c56f87e 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/model/util/HadoopSwitch.java
@@ -24,6 +24,8 @@
import org.eclipse.emf.ecore.EClass;
import org.eclipse.emf.ecore.EObject;
+import org.eclipse.emf.ecore.EPackage;
+import org.eclipse.emf.ecore.util.Switch;
/**
* <!-- begin-user-doc -->
@@ -38,7 +40,7 @@
* @see org.apache.hdt.core.internal.model.HadoopPackage
* @generated
*/
-public class HadoopSwitch<T> {
+public class HadoopSwitch<T> extends Switch<T> {
/**
* The cached model package
* <!-- begin-user-doc -->
@@ -60,14 +62,16 @@
}
/**
- * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
+ * Checks whether this is a switch for the given package.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
- * @return the first non-null result returned by a <code>caseXXX</code> call.
+ * @parameter ePackage the package in question.
+ * @return whether this is a switch for the given package.
* @generated
*/
- public T doSwitch(EObject theEObject) {
- return doSwitch(theEObject.eClass(), theEObject);
+ @Override
+ protected boolean isSwitchFor(EPackage ePackage) {
+ return ePackage == modelPackage;
}
/**
@@ -77,26 +81,7 @@
* @return the first non-null result returned by a <code>caseXXX</code> call.
* @generated
*/
- protected T doSwitch(EClass theEClass, EObject theEObject) {
- if (theEClass.eContainer() == modelPackage) {
- return doSwitch(theEClass.getClassifierID(), theEObject);
- }
- else {
- List<EClass> eSuperTypes = theEClass.getESuperTypes();
- return
- eSuperTypes.isEmpty() ?
- defaultCase(theEObject) :
- doSwitch(eSuperTypes.get(0), theEObject);
- }
- }
-
- /**
- * Calls <code>caseXXX</code> for each class of the model until one returns a non null result; it yields that result.
- * <!-- begin-user-doc -->
- * <!-- end-user-doc -->
- * @return the first non-null result returned by a <code>caseXXX</code> call.
- * @generated
- */
+ @Override
protected T doSwitch(int classifierID, EObject theEObject) {
switch (classifierID) {
case HadoopPackage.HDFS_SERVER: {
@@ -222,6 +207,7 @@
* @see #doSwitch(org.eclipse.emf.ecore.EObject)
* @generated
*/
+ @Override
public T defaultCase(EObject object) {
return null;
}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
index 133b9dd..38c5664 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/InterruptableZooKeeperClient.java
@@ -93,7 +93,10 @@
// Tell HDFS manager that the server timed out
if (logger.isDebugEnabled())
logger.debug("executeWithTimeout(): Server timed out: " + server);
- ZooKeeperManager.INSTANCE.disconnect(server);
+ try {
+ ZooKeeperManager.INSTANCE.disconnect(server);
+ } catch (Throwable t) {
+ }
throw new InterruptedException();
}
if (data.size() > 0)
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
index 4c36259..87b5cd5 100644
--- a/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/internal/zookeeper/ZooKeeperManager.java
@@ -35,6 +35,8 @@
import org.eclipse.core.runtime.Platform;
import org.eclipse.core.runtime.Status;
import org.eclipse.emf.common.util.EList;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
/**
* @author Srimanth Gunturi
@@ -62,11 +64,20 @@
/**
* @param zkServerName
* @param uri
+ * @throws CoreException
+ * @throws InterruptedException
+ * @throws IOException
*/
- public ZooKeeperServer createServer(String zkServerName, String zkServerLocation) {
+ public ZooKeeperServer createServer(String zkServerName, String zkServerLocation) throws CoreException {
ZooKeeperServer zkServer = HadoopFactory.eINSTANCE.createZooKeeperServer();
zkServer.setName(zkServerName);
zkServer.setUri(zkServerLocation);
+ try {
+ ZooKeeperManager.INSTANCE.getClient(zkServer).connect();
+ } catch (Exception e) {
+ logger.error("Error getting children of node", e);
+ throw new CoreException(new Status(Status.ERROR, Activator.BUNDLE_ID, "Error in creating server",e));
+ }
getServers().add(zkServer);
HadoopManager.INSTANCE.saveServers();
return zkServer;
@@ -74,22 +85,18 @@
/**
* @param r
+ * @throws CoreException
*/
- public void disconnect(ZooKeeperServer server) {
+ public void disconnect(ZooKeeperServer server) throws CoreException {
try {
if (ServerStatus.DISCONNECTED_VALUE != server.getStatusCode()) {
getClient(server).disconnect();
server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
}
- } catch (IOException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (InterruptedException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (CoreException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
+ } catch (Exception e) {
+ logger.error("Error in disconnet", e);
+ throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID,
+ "Unable to disconnect.",e));
}
}
@@ -97,8 +104,9 @@
* Provides a ZooKeeper instance using plugin extensions.
*
* @param r
+ * @throws CoreException
*/
- public void reconnect(ZooKeeperServer server) {
+ public void reconnect(ZooKeeperServer server) throws CoreException {
try {
if (logger.isDebugEnabled())
logger.debug("reconnect(): Reconnecting: " + server);
@@ -111,18 +119,11 @@
}
if (logger.isDebugEnabled())
logger.debug("reconnect(): Reconnected: " + server);
- } catch (IOException e) {
+ } catch (Exception e) {
server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (InterruptedException e) {
- server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (CoreException e) {
- server.setStatusCode(ServerStatus.DISCONNECTED_VALUE);
- // TODO Auto-generated catch block
- e.printStackTrace();
+ logger.error("Error in disconnet", e);
+ throw new CoreException(new Status(IStatus.ERROR, Activator.BUNDLE_ID,
+ "Unable to reconnect.",e));
}
}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
new file mode 100644
index 0000000..57862ef
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/AbstractHadoopCluster.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.HadoopVersion;
+import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.Platform;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.swt.widgets.Composite;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+public abstract class AbstractHadoopCluster {
+
+ public interface ChangeListener {
+ void notifyChange(ConfProp prop, String propValue);
+ }
+
+ public interface HadoopConfigurationBuilder {
+ void buildControl(Composite panel);
+
+ void notifyChange(ConfProp confProp, String text);
+
+ void setChangeListener(ChangeListener l);
+ }
+
+ private static final Logger logger = Logger.getLogger(AbstractHadoopCluster.class);
+
+ abstract public String getLocationName();
+
+ abstract public void dispose();
+
+ abstract public void storeSettingsToFile(File file) throws IOException;
+
+ abstract public void saveConfiguration(File confDir, String jarFilePath) throws IOException;
+
+ abstract public String getMasterHostName();
+
+ abstract public void setLocationName(String string);
+
+ abstract public void load(AbstractHadoopCluster server);
+
+ abstract public String getConfPropValue(String propName);
+
+ abstract public String getConfPropValue(ConfProp prop);
+
+ abstract public void setConfPropValue(ConfProp prop, String propValue);
+
+ abstract public void setConfPropValue(String propName, String propValue);
+
+ abstract public Iterator<Entry<String, String>> getConfiguration();
+
+ abstract public void purgeJob(IHadoopJob job);
+
+ abstract public void addJobListener(IJobListener jobListener);
+
+ abstract public Collection<? extends IHadoopJob> getJobs();
+
+ abstract public String getState();
+
+ abstract protected boolean loadConfiguration(Map<String, String> configuration);
+
+ abstract public boolean isAvailable() throws CoreException;
+
+ abstract public HadoopVersion getVersion();
+
+ abstract public HadoopConfigurationBuilder getUIConfigurationBuilder();
+
+ public static AbstractHadoopCluster createCluster(File file) throws CoreException, IOException {
+ Map<String, String> configuration = loadXML(file);
+ String version = configuration.get(ConfProp.PI_HADOOP_VERSION.name);
+ AbstractHadoopCluster hadoopCluster = createCluster(version != null ? version : ConfProp.PI_HADOOP_VERSION.defVal);
+ hadoopCluster.loadConfiguration(configuration);
+ return hadoopCluster;
+ }
+
+ public static AbstractHadoopCluster createCluster(String hadoopVersion) throws CoreException {
+ logger.debug("Creating client for version " + hadoopVersion);
+ IConfigurationElement[] elementsFor = Platform.getExtensionRegistry().getConfigurationElementsFor("org.apache.hdt.core.hadoopCluster");
+ for (IConfigurationElement configElement : elementsFor) {
+ String version = configElement.getAttribute("protocolVersion");
+ if (version.equalsIgnoreCase(hadoopVersion)) {
+ return (AbstractHadoopCluster) configElement.createExecutableExtension("class");
+ }
+ }
+ throw new CoreException(new Status(Status.ERROR, Activator.BUNDLE_ID, "No clinet found for hadoop version " + hadoopVersion));
+ }
+
+ public static AbstractHadoopCluster createCluster(AbstractHadoopCluster existing) throws CoreException {
+ AbstractHadoopCluster hadoopCluster = createCluster(existing.getVersion().getDisplayName());
+ hadoopCluster.load(existing);
+ return hadoopCluster;
+ }
+
+ protected static Map<String, String> loadXML(File file) {
+ DocumentBuilder builder;
+ Document document;
+ try {
+ builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+ document = builder.parse(file);
+ } catch (ParserConfigurationException e) {
+ e.printStackTrace();
+ return null;
+ } catch (SAXException e) {
+ e.printStackTrace();
+ return null;
+ } catch (IOException e) {
+ e.printStackTrace();
+ return null;
+ }
+ Element root = document.getDocumentElement();
+ if (!"configuration".equals(root.getTagName()))
+ return null;
+ NodeList props = root.getChildNodes();
+ Map<String, String> configuration = new HashMap<String, String>();
+ for (int i = 0; i < props.getLength(); i++) {
+ Node propNode = props.item(i);
+ if (!(propNode instanceof Element))
+ continue;
+ Element prop = (Element) propNode;
+ if (!"property".equals(prop.getTagName()))
+ return null;
+ NodeList fields = prop.getChildNodes();
+ String attr = null;
+ String value = null;
+ for (int j = 0; j < fields.getLength(); j++) {
+ Node fieldNode = fields.item(j);
+ if (!(fieldNode instanceof Element))
+ continue;
+ Element field = (Element) fieldNode;
+ if ("name".equals(field.getTagName()))
+ attr = ((Text) field.getFirstChild()).getData();
+ if ("value".equals(field.getTagName()) && field.hasChildNodes())
+ value = ((Text) field.getFirstChild()).getData();
+ }
+ if (attr != null && value != null)
+ configuration.put(attr, value);
+ }
+ return configuration;
+ }
+
+ /**
+ * @param propName
+ * @return
+ */
+ public ConfProp getConfPropForName(String propName) {
+ return ConfProp.getByName(propName);
+ }
+
+ public String getConfPropName(ConfProp prop) {
+ return prop.name;
+ }
+
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
new file mode 100644
index 0000000..b23adf9
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ConfProp.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public enum ConfProp {
+ /**
+ * Property name for the Hadoop location name
+ */
+ PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+
+ /**
+ * Property name for the Hadoop Version
+ */
+ PI_HADOOP_VERSION(true, "hadoop.version", "1.1"),
+
+ /**
+ * Property name for the master host name (the Job tracker)
+ */
+ PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+
+ PI_RESOURCE_MGR_HOST(true, "rm.host", "localhost"),
+
+ PI_JOB_HISTORY_HOST(true, "jobhistory.host", "localhost"),
+
+ /**
+ * Property name for the DFS master host name (the Name node)
+ */
+ PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
+
+ /**
+ * User name to use for Hadoop operations
+ */
+ PI_USER_NAME(true, "user.name", System.getProperty("user.name", "who are you?")),
+
+ /**
+ * Property name for SOCKS proxy activation
+ */
+ PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
+
+ /**
+ * Property name for the SOCKS proxy host
+ */
+ PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
+
+ /**
+ * Property name for the SOCKS proxy port
+ */
+ PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
+
+ /**
+ * TCP port number for the name node
+ */
+ PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
+
+ /**
+ * TCP port number for the job tracker
+ */
+ PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+
+ PI_RESOURCE_MGR_PORT(true, "rm.port", "8032"),
+
+ PI_JOB_HISTORY_PORT(true, "jobhistory.port", "10020"),
+
+ /**
+ * Are the Map/Reduce and the Distributed FS masters hosted on the same
+ * machine?
+ */
+ PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
+
+ /**
+ * Property name for naming the job tracker (URI). This property is related
+ * to {@link #PI_MASTER_HOST_NAME}
+ */
+ JOB_TRACKER_URI(false, "mapred.job.tracker", "localhost:50020"),
+
+ /**
+ * Property name for naming the default file system (URI).
+ */
+ FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+
+ RM_DEFAULT_URI(false, "yarn.resourcemanager.address", "localhost:8032"),
+
+ JOB_HISTORY_DEFAULT_URI(false, "mapreduce.jobhistory.address", "localhost:10020"),
+
+ /**
+ * Property name for the default socket factory:
+ */
+ SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default", "org.apache.hadoop.net.StandardSocketFactory"),
+
+ /**
+ * Property name for the SOCKS server URI.
+ */
+ SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
+
+ ;
+
+ /**
+ * Map <property name> -> ConfProp
+ */
+ private static Map<String, ConfProp> map;
+
+ private static synchronized void registerProperty(String name, ConfProp prop) {
+
+ if (ConfProp.map == null)
+ ConfProp.map = new HashMap<String, ConfProp>();
+
+ ConfProp.map.put(name, prop);
+ }
+
+ public static ConfProp getByName(String propName) {
+ return map.get(propName);
+ }
+ protected final String name;
+
+ public final String defVal;
+
+ ConfProp(boolean internal, String name, String defVal) {
+ if (internal)
+ name = "eclipse.plug-in." + name;
+ this.name = name;
+ this.defVal = defVal;
+
+ ConfProp.registerProperty(name, this);
+ }
+
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
new file mode 100644
index 0000000..82b6d10
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/ErrorMessageDialog.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Error dialog helper
+ */
+public class ErrorMessageDialog {
+
+ public static void display(final String title, final String message) {
+ Display.getDefault().syncExec(new Runnable() {
+
+ public void run() {
+ MessageDialog.openError(Display.getDefault().getActiveShell(), title, message);
+ }
+
+ });
+ }
+
+ public static void display(Exception e) {
+ display("An exception has occured!", "Exception description:\n" + e.getLocalizedMessage());
+ }
+
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
new file mode 100644
index 0000000..e403c57
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopClusterListener.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+/**
+ * Interface for monitoring server changes
+ */
+public interface IHadoopClusterListener {
+ void serverChanged(AbstractHadoopCluster location, int type);
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
new file mode 100644
index 0000000..0b58699
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IHadoopJob.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+public interface IHadoopJob {
+
+ boolean isCompleted();
+
+ AbstractHadoopCluster getLocation();
+
+ String getJobID();
+
+ void kill();
+
+ String getStatus();
+
+ String getState();
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
new file mode 100644
index 0000000..0af6c9f
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJarModule.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+import java.io.File;
+
+import org.eclipse.jface.operation.IRunnableWithProgress;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public interface IJarModule extends IRunnableWithProgress {
+
+ String getName();
+
+ /**
+ * Allow the retrieval of the resulting JAR file
+ *
+ * @return the generated JAR file
+ */
+ File getJarFile();
+
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
new file mode 100644
index 0000000..4dc3bc5
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/launch/IJobListener.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.launch;
+
+/**
+ * Interface for updating/adding jobs to the MapReduce Server view.
+ */
+public interface IJobListener {
+
+ void jobChanged(IHadoopJob job);
+
+ void jobAdded(IHadoopJob job);
+
+ void jobRemoved(IHadoopJob job);
+
+ void publishStart(IJarModule jar);
+
+ void publishDone(IJarModule jar);
+
+}
diff --git a/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
new file mode 100644
index 0000000..d350def
--- /dev/null
+++ b/org.apache.hdt.core/src/org/apache/hdt/core/natures/MapReduceNature.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.core.natures;
+
+import java.io.File;
+import java.net.URL;
+import java.util.Iterator;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+import org.apache.hdt.core.Activator;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectNature;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.core.runtime.QualifiedName;
+import org.eclipse.jdt.core.IClasspathEntry;
+import org.eclipse.jdt.core.IJavaProject;
+import org.eclipse.jdt.core.JavaCore;
+
+/**
+ * Class to configure and deconfigure an Eclipse project with the MapReduce
+ * project nature.
+ */
+
+public class MapReduceNature implements IProjectNature {
+
+ public static final String ID = "org.apache.hdt.mrnature";
+
+ private IProject project;
+
+ static Logger log = Logger.getLogger(MapReduceNature.class.getName());
+
+ /**
+ * Configures an Eclipse project as a Map/Reduce project by adding the
+ * Hadoop libraries to a project's classpath.
+ */
+ /*
+ * TODO Versioning connector needed here
+ */
+ public void configure() throws CoreException {
+
+ String hadoopHomePath = project.getPersistentProperty(new QualifiedName(Activator.BUNDLE_ID, "hadoop.runtime.path"));
+ String hadoopVersion = project.getPersistentProperty(new QualifiedName(Activator.BUNDLE_ID, "hadoop.version"));
+
+ AbstractHadoopHomeReader homeReader = AbstractHadoopHomeReader.createReader(hadoopVersion);
+ final List<File> coreJars = homeReader.getHadoopJars(new Path(hadoopHomePath).toFile());
+
+ // Add Hadoop libraries onto classpath
+ IJavaProject javaProject = JavaCore.create(getProject());
+ // Bundle bundle = Activator.getDefault().getBundle();
+ try {
+ IClasspathEntry[] currentCp = javaProject.getRawClasspath();
+ IClasspathEntry[] newCp = new IClasspathEntry[currentCp.length + coreJars.size()];
+ System.arraycopy(currentCp, 0, newCp, 0, currentCp.length);
+
+ final Iterator<File> i = coreJars.iterator();
+ int count = 0;
+ while (i.hasNext()) {
+ // for (int i = 0; i < s_coreJarNames.length; i++) {
+
+ final File f = (File) i.next();
+ // URL url = FileLocator.toFileURL(FileLocator.find(bundle, new
+ // Path("lib/" + s_coreJarNames[i]), null));
+ URL url = f.toURI().toURL();
+ log.finer("hadoop library url.getPath() = " + url.getPath());
+
+ newCp[newCp.length - 1 - count] = JavaCore.newLibraryEntry(new Path(url.getPath()), null, null);
+ count++;
+ }
+
+ javaProject.setRawClasspath(newCp, new NullProgressMonitor());
+ } catch (Exception e) {
+ log.log(Level.SEVERE, "IOException generated in " + this.getClass().getCanonicalName(), e);
+ }
+ }
+
+ /**
+ * Deconfigure a project from MapReduce status. Currently unimplemented.
+ */
+ public void deconfigure() throws CoreException {
+ // TODO Auto-generated method stub
+ }
+
+ /**
+ * Returns the project to which this project nature applies.
+ */
+ public IProject getProject() {
+ return this.project;
+ }
+
+ /**
+ * Sets the project to which this nature applies. Used when instantiating
+ * this project nature runtime.
+ */
+ public void setProject(IProject project) {
+ this.project = project;
+ }
+
+}
diff --git a/org.apache.hdt.dist/pom.xml b/org.apache.hdt.dist/pom.xml
index 7e06e62..ef1af8e 100644
--- a/org.apache.hdt.dist/pom.xml
+++ b/org.apache.hdt.dist/pom.xml
@@ -22,7 +22,7 @@
<parent>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
</parent>
<artifactId>org.apache.hdt.dist</artifactId>
<name>Apache Hadoop Development Tools Distribution</name>
diff --git a/org.apache.hdt.feature/.classpath b/org.apache.hdt.feature/.classpath
index 4c2b7c4..39b5586 100644
--- a/org.apache.hdt.feature/.classpath
+++ b/org.apache.hdt.feature/.classpath
@@ -5,5 +5,6 @@
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry kind="src" path="/org.apache.hdt.core"/>
<classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
+ <classpathentry kind="src" path="/org.apache.hdt.hadoop2.release"/>
<classpathentry kind="src" path="/org.apache.hdt.ui"/>
-</classpath>
\ No newline at end of file
+</classpath>
diff --git a/org.apache.hdt.feature/.project b/org.apache.hdt.feature/.project
index 017e5f9..6aff5d5 100644
--- a/org.apache.hdt.feature/.project
+++ b/org.apache.hdt.feature/.project
@@ -5,6 +5,7 @@
<projects>
<project>org.apache.hdt.core</project>
<project>org.apache.hdt.hadoop.release</project>
+ <project>org.apache.hdt.hadoop2.release</project>
<project>org.apache.hdt.ui</project>
</projects>
<buildSpec>
diff --git a/org.apache.hdt.feature/feature.xml b/org.apache.hdt.feature/feature.xml
index 6d351e2..9a1253a 100644
--- a/org.apache.hdt.feature/feature.xml
+++ b/org.apache.hdt.feature/feature.xml
@@ -2,7 +2,7 @@
<feature
id="org.apache.hdt.feature"
label="Hadoop Development Tools"
- version="0.0.1.incubating"
+ version="0.0.2.incubating"
provider-name="Apache Software Foundation">
<description url="http://hdt.incubator.apache.org/">
@@ -35,12 +35,20 @@
or implied. See the License for the specific language governing
permissions and limitations under the License.
</license>
-
+
<plugin
id="org.apache.hdt.hadoop.release"
download-size="0"
install-size="0"
- version="0.0.1.incubating"
+ version="0.0.2.incubating"
+ fragment="true"
+ unpack="false"/>
+
+ <plugin
+ id="org.apache.hdt.hadoop2.release"
+ download-size="0"
+ install-size="0"
+ version="0.0.2.incubating"
fragment="true"
unpack="false"/>
@@ -48,14 +56,14 @@
id="org.apache.hdt.ui"
download-size="0"
install-size="0"
- version="0.0.1.incubating"
+ version="0.0.2.incubating"
unpack="false"/>
<plugin
id="org.apache.hdt.core"
download-size="0"
install-size="0"
- version="0.0.1.incubating"
+ version="0.0.2.incubating"
unpack="false"/>
</feature>
diff --git a/org.apache.hdt.feature/pom.xml b/org.apache.hdt.feature/pom.xml
index de6f2dd..19a5a18 100644
--- a/org.apache.hdt.feature/pom.xml
+++ b/org.apache.hdt.feature/pom.xml
@@ -22,7 +22,7 @@
<relativePath>../pom.xml</relativePath>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
</parent>
<artifactId>org.apache.hdt.feature</artifactId>
diff --git a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
index 1e0d762..0f56f4b 100644
--- a/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.hadoop.release/META-INF/MANIFEST.MF
@@ -2,19 +2,65 @@
Bundle-ManifestVersion: 2
Bundle-Name: Apache Hadoop 0.0.1.qualifier Release Eclipse Plugin
Bundle-SymbolicName: org.apache.hdt.hadoop.release;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.incubating
Bundle-Vendor: Apache Hadoop
-Fragment-Host: org.apache.hdt.core
Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Require-Bundle: org.apache.hdt.core,
+ org.eclipse.core.runtime,
+ org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
+ org.eclipse.core.resources;bundle-version="3.6.0",
+ org.eclipse.swt,
+ org.eclipse.jface
Bundle-ClassPath: .,
- jars/log4j-1.2.15.jar,
- jars/slf4j-api-1.6.1.jar,
+ jars/zookeeper-3.4.5.jar,
jars/slf4j-log4j12-1.6.1.jar,
- jars/commons-configuration-1.6.jar,
- jars/commons-lang-2.4.jar,
- jars/commons-logging-1.1.1.jar,
- jars/hadoop-client-1.1.2.jar,
+ jars/slf4j-api-1.6.1.jar,
+ jars/log4j-1.2.15.jar,
+ jars/xmlenc-0.52.jar,
+ jars/stax-api-1.0-2.jar,
+ jars/stax-api-1.0.1.jar,
+ jars/servlet-api-2.5-6.1.14.jar,
+ jars/servlet-api-2.5-20081211.jar,
+ jars/oro-2.0.8.jar,
+ jars/junit-4.11.jar,
+ jars/jsp-api-2.1-6.1.14.jar,
+ jars/jsp-2.1-6.1.14.jar,
+ jars/jetty-util-6.1.26.jar,
+ jars/jetty-6.1.26.jar,
+ jars/jettison-1.1.jar,
+ jars/jets3t-0.6.1.jar,
+ jars/jersey-server-1.8.jar,
+ jars/jersey-json-1.8.jar,
+ jars/jersey-core-1.8.jar,
+ jars/jaxb-impl-2.2.3-1.jar,
+ jars/jaxb-api-2.2.2.jar,
+ jars/jasper-runtime-5.5.12.jar,
+ jars/jasper-compiler-5.5.12.jar,
+ jars/jackson-xc-1.7.1.jar,
+ jars/jackson-mapper-asl-1.8.8.jar,
+ jars/jackson-jaxrs-1.7.1.jar,
+ jars/jackson-core-asl-1.7.1.jar,
+ jars/hsqldb-1.8.0.10.jar,
+ jars/hamcrest-core-1.3.jar,
jars/hadoop-core-1.1.2.jar,
- jars/hadoop-test-1.1.2.jar,
+ jars/core-3.1.1.jar,
+ jars/commons-net-1.4.1.jar,
+ jars/commons-math-2.1.jar,
+ jars/commons-logging-1.1.1.jar,
+ jars/commons-lang-2.4.jar,
+ jars/commons-io-2.1.jar,
+ jars/commons-httpclient-3.0.1.jar,
+ jars/commons-el-1.0.jar,
+ jars/commons-digester-1.8.jar,
+ jars/commons-configuration-1.6.jar,
+ jars/commons-collections-3.2.1.jar,
+ jars/commons-codec-1.4.jar,
+ jars/commons-cli-1.2.jar,
+ jars/commons-beanutils-core-1.8.0.jar,
+ jars/commons-beanutils-1.7.0.jar,
+ jars/asm-3.1.jar,
+ jars/ant-1.6.5.jar,
+ jars/activation-1.1.jar,
jars/hadoop-tools-1.1.2.jar,
- jars/zookeeper-3.4.5.jar
+ jars/hadoop-test-1.1.2.jar,
+ jars/hadoop-client-1.1.2.jar
diff --git a/org.apache.hdt.hadoop.release/build.properties b/org.apache.hdt.hadoop.release/build.properties
index 6d99810..848ab4a 100644
--- a/org.apache.hdt.hadoop.release/build.properties
+++ b/org.apache.hdt.hadoop.release/build.properties
@@ -19,11 +19,5 @@
output.. = bin/
bin.includes = META-INF/,\
.,\
- fragment.xml,\
- jars/,\
- jars/slf4j-api-1.6.1.jar,\
- jars/slf4j-log4j12-1.6.1.jar,\
- jars/commons-configuration-1.6.jar,\
- jars/commons-lang-2.4.jar,\
- jars/commons-logging-1.1.1.jar,\
- jars/log4j-1.2.15.jar
+ plugin.xml,\
+ jars/
diff --git a/org.apache.hdt.hadoop.release/fragment.xml b/org.apache.hdt.hadoop.release/plugin.xml
similarity index 71%
rename from org.apache.hdt.hadoop.release/fragment.xml
rename to org.apache.hdt.hadoop.release/plugin.xml
index 1b11581..62cb794 100644
--- a/org.apache.hdt.hadoop.release/fragment.xml
+++ b/org.apache.hdt.hadoop.release/plugin.xml
@@ -16,13 +16,13 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-<fragment>
+<plugin>
<extension
point="org.apache.hdt.core.hdfsClient">
<hdfsClient
class="org.apache.hdt.hadoop.release.HDFSClientRelease"
protocol="hdfs"
- protocolVersion="1.1.2.21">
+ protocolVersion="1.1">
</hdfsClient>
</extension>
<extension
@@ -32,5 +32,20 @@
protocolVersion="3.4.5">
</zookeeperClient>
</extension>
+ <extension
+ point="org.apache.hdt.core.hadoopCluster">
+ <hadoopCluster
+ class="org.apache.hdt.hadoop.release.HadoopCluster"
+ protocolVersion="1.1">
+ </hadoopCluster>
+ </extension>
+ <extension
+ point="org.apache.hdt.core.hadoopHomeReader">
+ <hadoopHomeReader
+ class="org.apache.hdt.hadoop.release.HadoopHomeReader"
+ protocolVersion="1.1">
+ </hadoopHomeReader>
+ </extension>
+
-</fragment>
+</plugin>
diff --git a/org.apache.hdt.hadoop.release/pom.xml b/org.apache.hdt.hadoop.release/pom.xml
index 279d131..e08e28c 100644
--- a/org.apache.hdt.hadoop.release/pom.xml
+++ b/org.apache.hdt.hadoop.release/pom.xml
@@ -22,11 +22,17 @@
<relativePath>../pom.xml</relativePath>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
</parent>
<artifactId>org.apache.hdt.hadoop.release</artifactId>
<packaging>eclipse-plugin</packaging>
<name>Apache Hadoop Devlopment Tools Assembly</name>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-core</artifactId>
+ </dependency>
+ </dependencies>
<build>
<sourceDirectory>src</sourceDirectory>
<plugins>
@@ -36,6 +42,20 @@
<version>2.8</version>
<executions>
<execution>
+ <id>copy-dependencies</id>
+ <phase>initialize</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <excludeScope>system</excludeScope>
+ <outputDirectory>${basedir}/jars</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>false</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ </configuration>
+ </execution>
+ <execution>
<id>copy</id>
<phase>initialize</phase>
<goals>
@@ -50,11 +70,6 @@
</artifactItem>
<artifactItem>
<groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-core</artifactId>
- <overWrite>false</overWrite>
- </artifactItem>
- <artifactItem>
- <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-test</artifactId>
<overWrite>false</overWrite>
</artifactItem>
@@ -83,21 +98,6 @@
<artifactId>slf4j-log4j12</artifactId>
<overWrite>false</overWrite>
</artifactItem>
- <artifactItem>
- <groupId>commons-configuration</groupId>
- <artifactId>commons-configuration</artifactId>
- <overWrite>false</overWrite>
- </artifactItem>
- <artifactItem>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <overWrite>false</overWrite>
- </artifactItem>
- <artifactItem>
- <groupId>commons-logging</groupId>
- <artifactId>commons-logging</artifactId>
- <overWrite>false</overWrite>
- </artifactItem>
</artifactItems>
<outputDirectory>${basedir}/jars</outputDirectory>
<overWriteReleases>false</overWriteReleases>
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
new file mode 100644
index 0000000..c64f757
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopCluster.java
@@ -0,0 +1,547 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.ServiceLoader;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Logger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.HadoopVersion;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ *
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D<port>
+ * <host></tt>)
+ *
+ * <p>
+ * <em> TODO </em>
+ * <li>Disable the updater if a location becomes unreachable or fails for tool
+ * long
+ * <li>Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster extends AbstractHadoopCluster {
+ private ExecutorService service= Executors.newSingleThreadExecutor();
+
+ /**
+ * Frequency of location status observations expressed as the delay in ms
+ * between each observation
+ *
+ * TODO Add a preference parameter for this
+ */
+ protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+ /**
+ *
+ */
+ public class LocationStatusUpdater extends Job {
+
+ JobClient client = null;
+
+ /**
+ * Setup the updater
+ */
+ public LocationStatusUpdater() {
+ super("Map/Reduce location status updater");
+ this.setSystem(true);
+ }
+
+ /* @inheritDoc */
+ @Override
+ protected IStatus run(IProgressMonitor monitor) {
+ if (client == null) {
+ try {
+ client = HadoopCluster.this.getJobClient();
+
+ } catch (IOException ioe) {
+ client = null;
+ return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot connect to the Map/Reduce location: "
+ + HadoopCluster.this.getLocationName(), ioe);
+ }
+ }
+ Thread current = Thread.currentThread();
+ ClassLoader oldLoader = current.getContextClassLoader();
+ try {
+ current.setContextClassLoader(HadoopCluster.class.getClassLoader());
+ // Set of all known existing Job IDs we want fresh info of
+ Set<JobID> missingJobIds = new HashSet<JobID>(runningJobs.keySet());
+
+ JobStatus[] jstatus = client.jobsToComplete();
+ jstatus = jstatus == null ? new JobStatus[0] : jstatus;
+ for (final JobStatus status : jstatus) {
+
+ final JobID jobId = status.getJobID();
+ missingJobIds.remove(jobId);
+
+ HadoopJob hJob;
+ synchronized (HadoopCluster.this.runningJobs) {
+ hJob = runningJobs.get(jobId);
+ if (hJob == null) {
+ // Unknown job, create an entry
+ final RunningJob running = client.getJob(jobId);
+ ServiceLoader<FileSystem> serviceLoader = ServiceLoader.load(FileSystem.class);
+ for (FileSystem fs : serviceLoader) {
+ System.out.println(fs.getClass().getProtectionDomain().getCodeSource().getLocation());
+ }
+ hJob = new HadoopJob(HadoopCluster.this, jobId, running, status);
+ newJob(hJob);
+ }
+ }
+
+ // Update HadoopJob with fresh infos
+ updateJob(hJob, status);
+ }
+
+ // Ask explicitly for fresh info for these Job IDs
+ for (JobID jobId : missingJobIds) {
+ HadoopJob hJob = runningJobs.get(jobId);
+ if (!hJob.isCompleted())
+ updateJob(hJob, null);
+ }
+
+ } catch (IOException ioe) {
+ client = null;
+ return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot retrieve running Jobs on location: " + HadoopCluster.this.getLocationName(),
+ ioe);
+ }finally {
+ current.setContextClassLoader(oldLoader);
+ }
+
+ // Schedule the next observation
+ schedule(STATUS_OBSERVATION_DELAY);
+
+ return Status.OK_STATUS;
+ }
+
+ /**
+ * Stores and make the new job available
+ *
+ * @param data
+ */
+ private void newJob(final HadoopJob data) {
+ runningJobs.put(data.jobId, data);
+
+ Display.getDefault().asyncExec(new Runnable() {
+ public void run() {
+ fireJobAdded(data);
+ }
+ });
+ }
+
+ /**
+ * Updates the status of a job
+ *
+ * @param job
+ * the job to update
+ */
+ private void updateJob(final HadoopJob job, JobStatus status) {
+ job.update(status);
+
+ Display.getDefault().asyncExec(new Runnable() {
+ public void run() {
+ fireJobChanged(job);
+ }
+ });
+ }
+
+ }
+
+ static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+ /**
+ * Hadoop configuration of the location. Also contains specific parameters
+ * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+ */
+ private Configuration conf;
+
+ /**
+ * Jobs listeners
+ */
+ private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+ /**
+ * Jobs running on this location. The keys of this map are the Job IDs.
+ */
+ private transient Map<JobID, HadoopJob> runningJobs = Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+ /**
+ * Status updater for this location
+ */
+ private LocationStatusUpdater statusUpdater;
+
+ // state and status - transient
+ private transient String state = "";
+
+ /**
+ * Creates a new default Hadoop location
+ */
+ public HadoopCluster() {
+ this.conf = new Configuration();
+ this.addPluginConfigDefaultProperties();
+ }
+
+ /**
+ * Create a new Hadoop location by copying an already existing one.
+ *
+ * @param source
+ * the location to copy
+ */
+ public HadoopCluster(HadoopCluster existing) {
+ this();
+ this.load(existing);
+ }
+
+ public void addJobListener(IJobListener l) {
+ jobListeners.add(l);
+ }
+
+ public void dispose() {
+ // TODO close DFS connections?
+ }
+
+ /**
+ * List all elements that should be present in the Server window (all
+ * servers and all jobs running on each servers)
+ *
+ * @return collection of jobs for this location
+ */
+ public Collection<? extends IHadoopJob> getJobs() {
+ startStatusUpdater();
+ return this.runningJobs.values();
+ }
+
+ /**
+ * Remove the given job from the currently running jobs map
+ *
+ * @param job
+ * the job to remove
+ */
+ public void purgeJob(final IHadoopJob job) {
+ runningJobs.remove(JobID.forName(job.getJobID()));
+ Display.getDefault().asyncExec(new Runnable() {
+ public void run() {
+ fireJobRemoved(job);
+ }
+ });
+ }
+
+ /**
+ * Returns the {@link Configuration} defining this location.
+ *
+ * @return the location configuration
+ */
+ public Iterator<Entry<String, String>> getConfiguration() {
+ return this.conf.iterator();
+ }
+
+ /**
+ * @return the conf
+ */
+ public Configuration getConf() {
+ return conf;
+ }
+
+ /**
+ * Gets a Hadoop configuration property value
+ *
+ * @param prop
+ * the configuration property
+ * @return the property value
+ */
+ public String getConfPropValue(ConfProp prop) {
+ return conf.get(getConfPropName(prop));
+ }
+
+ /**
+ * Gets a Hadoop configuration property value
+ *
+ * @param propName
+ * the property name
+ * @return the property value
+ */
+ public String getConfPropValue(String propName) {
+ return this.conf.get(propName);
+ }
+
+ public String getLocationName() {
+ return getConfPropValue(ConfProp.PI_LOCATION_NAME);
+ }
+
+ /**
+ * Returns the master host name of the Hadoop location (the Job tracker)
+ *
+ * @return the host name of the Job tracker
+ */
+ public String getMasterHostName() {
+ return getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+ }
+
+ public String getState() {
+ return state;
+ }
+
+ /**
+ * Overwrite this location with the given existing location
+ *
+ * @param existing
+ * the existing location
+ */
+ public void load(AbstractHadoopCluster existing) {
+ this.conf = new Configuration(((HadoopCluster) existing).conf);
+ }
+
+ protected boolean loadConfiguration(Map<String, String> configuration) {
+ Configuration newConf = new Configuration(this.conf);
+ if(configuration ==null)
+ return false;
+
+ for (Entry<String, String> entry : configuration.entrySet()) {
+ newConf.set(entry.getKey() , entry.getValue());
+ }
+
+ this.conf = newConf;
+ return true;
+ }
+
+ /**
+ * Sets a Hadoop configuration property value
+ *
+ * @param prop
+ * the property
+ * @param propvalue
+ * the property value
+ */
+ public void setConfPropValue(ConfProp prop, String propValue) {
+ if (propValue != null)
+ setConfPropValue(getConfPropName(prop), propValue);
+ }
+
+ @Override
+ public void setConfPropValue(String propName, String propValue) {
+ conf.set(propName, propValue);
+ }
+
+ public void setLocationName(String newName) {
+ setConfPropValue(ConfProp.PI_LOCATION_NAME, newName);
+ }
+
+ /**
+ * Write this location settings to the given output stream
+ *
+ * @param out
+ * the output stream
+ * @throws IOException
+ */
+ public void storeSettingsToFile(File file) throws IOException {
+ FileOutputStream fos = new FileOutputStream(file);
+ try {
+ this.conf.writeXml(fos);
+ fos.close();
+ fos = null;
+ } finally {
+ IOUtils.closeStream(fos);
+ }
+
+ }
+
+ /* @inheritDoc */
+ @Override
+ public String toString() {
+ return this.getLocationName();
+ }
+
+ /**
+ * Fill the configuration with valid default values
+ */
+ private void addPluginConfigDefaultProperties() {
+ for (ConfProp prop : ConfProp.values()) {
+ conf.set(getConfPropName(prop), prop.defVal);
+ }
+ }
+
+ /**
+ * Starts the location status updater
+ */
+ private synchronized void startStatusUpdater() {
+ if (statusUpdater == null) {
+ statusUpdater = new LocationStatusUpdater();
+ statusUpdater.schedule();
+ }
+ }
+
+ /*
+ * Rewrite of the connecting and tunneling to the Hadoop location
+ */
+
+ /**
+ * Provides access to the default file system of this location.
+ *
+ * @return a {@link FileSystem}
+ */
+ public FileSystem getDFS() throws IOException {
+ return FileSystem.get(this.conf);
+ }
+
+ /**
+ * Provides access to the Job tracking system of this location
+ *
+ * @return a {@link JobClient}
+ */
+ public JobClient getJobClient() throws IOException {
+ JobConf jconf = new JobConf(this.conf);
+ return new JobClient(jconf);
+ }
+
+ /*
+ * Listeners handling
+ */
+
+ protected void fireJarPublishDone(IJarModule jar) {
+ for (IJobListener listener : jobListeners) {
+ listener.publishDone(jar);
+ }
+ }
+
+ protected void fireJarPublishStart(IJarModule jar) {
+ for (IJobListener listener : jobListeners) {
+ listener.publishStart(jar);
+ }
+ }
+
+ protected void fireJobAdded(HadoopJob job) {
+ for (IJobListener listener : jobListeners) {
+ listener.jobAdded(job);
+ }
+ }
+
+ protected void fireJobRemoved(IHadoopJob job) {
+ for (IJobListener listener : jobListeners) {
+ listener.jobRemoved(job);
+ }
+ }
+
+ protected void fireJobChanged(HadoopJob job) {
+ for (IJobListener listener : jobListeners) {
+ listener.jobChanged(job);
+ }
+ }
+
+ @Override
+ public void saveConfiguration(File confDir, String jarFilePath) throws IOException {
+ // Prepare the Hadoop configuration
+ JobConf conf = new JobConf(this.conf);
+ conf.setJar(jarFilePath);
+ // Write it to the disk file
+ File coreSiteFile = new File(confDir, "core-site.xml");
+ File mapredSiteFile = new File(confDir, "mapred-site.xml");
+ FileOutputStream fos = new FileOutputStream(coreSiteFile);
+ FileInputStream fis = null;
+ try {
+ conf.writeXml(fos);
+ fos.close();
+ fos = new FileOutputStream(mapredSiteFile);
+ fis = new FileInputStream(coreSiteFile);
+ IOUtils.copyBytes(new BufferedInputStream(fis), fos, 4096);
+ } finally {
+ IOUtils.closeStream(fos);
+ IOUtils.closeStream(fis);
+ }
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hdt.core.launch.AbstractHadoopCluster#isAvailable()
+ */
+ @Override
+ public boolean isAvailable() throws CoreException {
+ Callable<JobClient> task= new Callable<JobClient>() {
+
+ @Override
+ public JobClient call() throws Exception {
+ return getJobClient();
+ }
+ };
+ Future<JobClient> jobClientFuture = service.submit(task);
+ try{
+ JobClient jobClient = jobClientFuture.get(5, TimeUnit.SECONDS);
+ return jobClient!=null;
+ }catch(Exception e){
+ e.printStackTrace();
+ throw new CoreException(new Status(Status.ERROR,
+ Activator.BUNDLE_ID, "unable to connect to server", e));
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hdt.core.launch.AbstractHadoopCluster#getVersion()
+ */
+ @Override
+ public HadoopVersion getVersion() {
+ return HadoopVersion.Version1;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hdt.core.launch.AbstractHadoopCluster#getUIConfigurationBuilder()
+ */
+ @Override
+ public HadoopConfigurationBuilder getUIConfigurationBuilder() {
+ return new HadoopV1ConfigurationBuilder(this);
+ }
+}
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopHomeReader.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopHomeReader.java
new file mode 100644
index 0000000..ef0952d
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopHomeReader.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop.release;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+import org.eclipse.core.runtime.Path;
+
+public class HadoopHomeReader extends AbstractHadoopHomeReader {
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.AbstractHadoopHomeReader#validateHadoopHome(java.
+ * io.File)
+ */
+ @Override
+ public boolean validateHadoopHome(File location) {
+ FilenameFilter gotHadoopJar = new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ return (name.startsWith("hadoop") && name.endsWith(".jar") && (name.indexOf("test") == -1) && (name.indexOf("examples") == -1));
+ }
+ };
+ return location.exists() && (location.list(gotHadoopJar).length > 0);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.AbstractHadoopHomeReader#getHadoopJars(java.io.File)
+ */
+ @Override
+ public List<File> getHadoopJars(File hadoopHome) {
+ File hadoopLib = new File(hadoopHome, "lib");
+
+ final ArrayList<File> coreJars = new ArrayList<File>();
+ coreJars.addAll(getJarFiles(hadoopHome));
+ coreJars.addAll(getJarFiles(hadoopLib));
+ return coreJars;
+ }
+
+ private ArrayList<File> getJarFiles(File hadoopHome) {
+ FilenameFilter jarFileFilter = new FilenameFilter() {
+ @Override
+ public boolean accept(File dir, String name) {
+ return name.endsWith(".jar");
+ }
+ };
+ final ArrayList<File> jars = new ArrayList<File>();
+ for (String hadopCoreLibFileName : hadoopHome.list(jarFileFilter)) {
+ jars.add(new File(hadoopHome, hadopCoreLibFileName));
+ }
+ return jars;
+ }
+
+}
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
new file mode 100644
index 0000000..9200674
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopJob.java
@@ -0,0 +1,344 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.eclipse.core.runtime.internal.adaptor.ContextFinder;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob implements IHadoopJob {
+
+ /**
+ * Enum representation of a Job state
+ */
+ public enum JobState {
+ PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+ final int state;
+
+ JobState(int state) {
+ this.state = state;
+ }
+
+ static JobState ofInt(int state) {
+ if (state == JobStatus.PREP) {
+ return PREPARE;
+ } else if (state == JobStatus.RUNNING) {
+ return RUNNING;
+ } else if (state == JobStatus.FAILED) {
+ return FAILED;
+ } else if (state == JobStatus.SUCCEEDED) {
+ return SUCCEEDED;
+ } else {
+ return null;
+ }
+ }
+ }
+
+ /**
+ * Location this Job runs on
+ */
+ private final HadoopCluster location;
+
+ /**
+ * Unique identifier of this Job
+ */
+ final JobID jobId;
+
+ /**
+ * Status representation of a running job. This actually contains a
+ * reference to a JobClient. Its methods might block.
+ */
+ RunningJob running;
+
+ /**
+ * Last polled status
+ *
+ * @deprecated should apparently not be used
+ */
+ JobStatus status;
+
+ /**
+ * Last polled counters
+ */
+ Counters counters;
+
+ /**
+ * Job Configuration
+ */
+ JobConf jobConf = null;
+
+ boolean completed = false;
+
+ boolean successful = false;
+
+ boolean killed = false;
+
+ int totalMaps;
+
+ int totalReduces;
+
+ int completedMaps;
+
+ int completedReduces;
+
+ float mapProgress;
+
+ float reduceProgress;
+
+ /**
+ * Constructor for a Hadoop job representation
+ *
+ * @param location
+ * @param id
+ * @param running
+ * @param status
+ */
+ public HadoopJob(HadoopCluster location, JobID id, RunningJob running, JobStatus status) {
+ //HadoopCluster.updateCurrentClassLoader();
+
+ this.location = location;
+ this.jobId = id;
+ this.running = running;
+
+ loadJobFile();
+
+ update(status);
+ }
+
+ /**
+ * Try to locate and load the JobConf file for this job so to get more
+ * details on the job (number of maps and of reduces)
+ */
+ private void loadJobFile() {
+ try {
+ String jobFile = getJobFile();
+ FileSystem fs = location.getDFS();
+ File tmp = File.createTempFile(getJobID().toString(), ".xml");
+ if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location.getConf())) {
+ this.jobConf = new JobConf(tmp.toString());
+
+ this.totalMaps = jobConf.getNumMapTasks();
+ this.totalReduces = jobConf.getNumReduceTasks();
+ }
+
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ }
+ }
+
+ /* @inheritDoc */
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+ result = prime * result + ((location == null) ? 0 : location.hashCode());
+ return result;
+ }
+
+ /* @inheritDoc */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (!(obj instanceof HadoopJob))
+ return false;
+ final HadoopJob other = (HadoopJob) obj;
+ if (jobId == null) {
+ if (other.jobId != null)
+ return false;
+ } else if (!jobId.equals(other.jobId))
+ return false;
+ if (location == null) {
+ if (other.location != null)
+ return false;
+ } else if (!location.equals(other.location))
+ return false;
+ return true;
+ }
+
+ /**
+ * Get the running status of the Job (@see {@link JobStatus}).
+ *
+ * @return
+ */
+ public String getState() {
+ if (this.completed) {
+ if (this.successful) {
+ return JobState.SUCCEEDED.toString();
+ } else {
+ return JobState.FAILED.toString();
+ }
+ } else {
+ return JobState.RUNNING.toString();
+ }
+ // return JobState.ofInt(this.status.getRunState());
+ }
+
+ /**
+ * @return
+ */
+ public String getJobID() {
+ return this.jobId.toString();
+ }
+
+ /**
+ * @return
+ */
+ public AbstractHadoopCluster getLocation() {
+ return this.location;
+ }
+
+ /**
+ * @return
+ */
+ public boolean isCompleted() {
+ return this.completed;
+ }
+
+ /**
+ * @return
+ */
+ public String getJobName() {
+ return this.running.getJobName();
+ }
+
+ /**
+ * @return
+ */
+ public String getJobFile() {
+ return this.running.getJobFile();
+ }
+
+ /**
+ * Return the tracking URL for this Job.
+ *
+ * @return string representation of the tracking URL for this Job
+ */
+ public String getTrackingURL() {
+ return this.running.getTrackingURL();
+ }
+
+ /**
+ * Returns a string representation of this job status
+ *
+ * @return string representation of this job status
+ */
+ public String getStatus() {
+
+ StringBuffer s = new StringBuffer();
+
+ s.append("Maps : " + completedMaps + "/" + totalMaps);
+ s.append(" (" + mapProgress + ")");
+ s.append(" Reduces : " + completedReduces + "/" + totalReduces);
+ s.append(" (" + reduceProgress + ")");
+
+ return s.toString();
+ }
+
+ /**
+ * Update this job status according to the given JobStatus
+ *
+ * @param status
+ */
+ void update(JobStatus status) {
+ this.status = status;
+ try {
+ this.counters = running.getCounters();
+ this.completed = running.isComplete();
+ this.successful = running.isSuccessful();
+ this.mapProgress = running.mapProgress();
+ this.reduceProgress = running.reduceProgress();
+ // running.getTaskCompletionEvents(fromEvent);
+
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ }
+
+ this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+ this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+ }
+
+ /**
+ * Print this job counters (for debugging purpose)
+ */
+ void printCounters() {
+ System.out.printf("New Job:\n", counters);
+ for (String groupName : counters.getGroupNames()) {
+ Counters.Group group = counters.getGroup(groupName);
+ System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+ for (Counters.Counter counter : group) {
+ System.out.printf("\t\t%s: %s\n", counter.getDisplayName(), counter.getCounter());
+ }
+ }
+ System.out.printf("\n");
+ }
+
+ /**
+ * Kill this job
+ */
+ public void kill() {
+ try {
+ this.running.killJob();
+ this.killed = true;
+
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Print this job status (for debugging purpose)
+ */
+ public void display() {
+ System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+ System.out.printf("Configuration file: %s\n", getJobID());
+ System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+ System.out.printf("Completion: map: %f reduce %f\n", 100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+ System.out.println("Job total maps = " + totalMaps);
+ System.out.println("Job completed maps = " + completedMaps);
+ System.out.println("Map percentage complete = " + mapProgress);
+ System.out.println("Job total reduces = " + totalReduces);
+ System.out.println("Job completed reduces = " + completedReduces);
+ System.out.println("Reduce percentage complete = " + reduceProgress);
+ System.out.flush();
+ }
+
+}
diff --git a/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopV1ConfigurationBuilder.java b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopV1ConfigurationBuilder.java
new file mode 100644
index 0000000..fb5eace
--- /dev/null
+++ b/org.apache.hdt.hadoop.release/src/org/apache/hdt/hadoop/release/HadoopV1ConfigurationBuilder.java
@@ -0,0 +1,690 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop.release;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder;
+import org.apache.hdt.core.launch.ConfProp;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.custom.ScrolledComposite;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Control;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.TabFolder;
+import org.eclipse.swt.widgets.TabItem;
+import org.eclipse.swt.widgets.Text;
+
+class HadoopV1ConfigurationBuilder implements HadoopConfigurationBuilder {
+
+ private AbstractHadoopCluster location;
+ private TabMediator mediator;
+ private ChangeListener changelistener;
+
+ public HadoopV1ConfigurationBuilder(AbstractHadoopCluster location) {
+ this.location = location;
+ }
+ @Override
+ public void buildControl(Composite panel) {
+ mediator = new TabMediator(panel);
+ GridData gdata = new GridData(GridData.FILL_BOTH);
+ gdata.horizontalSpan = 2;
+ mediator.folder.setLayoutData(gdata);
+ }
+
+
+ private class TabMediator {
+ TabFolder folder;
+ private Set<ChangeListener> tabs = new HashSet<ChangeListener>();
+
+ TabMediator(Composite parent) {
+ folder = new TabFolder(parent, SWT.NONE);
+ tabs.add(new TabMain(this));
+ tabs.add(new TabAdvanced(this));
+ }
+
+ /**
+ * Implements change notifications from any tab: update the location
+ * state and other tabs
+ *
+ * @param source
+ * origin of the notification (one of the tree tabs)
+ * @param propName
+ * modified property
+ * @param propValue
+ * new value
+ */
+ void notifyChange(ChangeListener source, final ConfProp prop, final String propValue) {
+ // Ignore notification when no change
+ String oldValue = location.getConfPropValue(prop);
+ if ((oldValue != null) && oldValue.equals(propValue))
+ return;
+
+ location.setConfPropValue(prop, propValue);
+ changelistener.notifyChange(prop, propValue);
+ this.fireChange(source, prop, propValue);
+
+ /*
+ * Now we deal with dependencies between settings
+ */
+ final String jobTrackerHost = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+ final String jobTrackerPort = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_PORT);
+ final String nameNodeHost = location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST);
+ final String nameNodePort = location.getConfPropValue(ConfProp.PI_NAME_NODE_PORT);
+ final boolean colocate = location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
+ final String jobTrackerURI = location.getConfPropValue(ConfProp.JOB_TRACKER_URI);
+ final String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+ final String socksServerURI = location.getConfPropValue(ConfProp.SOCKS_SERVER);
+ final boolean socksProxyEnable = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
+ final String socksProxyHost = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST);
+ final String socksProxyPort = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT);
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ switch (prop) {
+ case PI_JOB_TRACKER_HOST: {
+ if (colocate)
+ notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
+ String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+ notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+ break;
+ }
+ case PI_JOB_TRACKER_PORT: {
+ String newJobTrackerURI = String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+ notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+ break;
+ }
+ case PI_NAME_NODE_HOST: {
+ String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+ notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+
+ // Break colocation if someone force the DFS Master
+ if (!colocate && !nameNodeHost.equals(jobTrackerHost))
+ notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+ break;
+ }
+ case PI_NAME_NODE_PORT: {
+ String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+ notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+ break;
+ }
+ case PI_SOCKS_PROXY_HOST: {
+ String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+ notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+ break;
+ }
+ case PI_SOCKS_PROXY_PORT: {
+ String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+ notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+ break;
+ }
+ case JOB_TRACKER_URI: {
+ String[] strs = jobTrackerURI.split(":", 2);
+ String host = strs[0];
+ String port = (strs.length == 2) ? strs[1] : "";
+ notifyChange(null, ConfProp.PI_JOB_TRACKER_HOST, host);
+ notifyChange(null, ConfProp.PI_JOB_TRACKER_PORT, port);
+ break;
+ }
+ case FS_DEFAULT_URI: {
+ try {
+ URI uri = new URI(fsDefaultURI);
+ if (uri.getScheme().equals("hdfs")) {
+ String host = uri.getHost();
+ String port = Integer.toString(uri.getPort());
+ notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
+ notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
+ }
+ } catch (URISyntaxException use) {
+ // Ignore the update!
+ }
+ break;
+ }
+ case SOCKS_SERVER: {
+ String[] strs = socksServerURI.split(":", 2);
+ String host = strs[0];
+ String port = (strs.length == 2) ? strs[1] : "";
+ notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
+ notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
+ break;
+ }
+ case PI_COLOCATE_MASTERS: {
+ if (colocate)
+ notifyChange(null, ConfProp.PI_NAME_NODE_HOST, jobTrackerHost);
+ break;
+ }
+ case PI_SOCKS_PROXY_ENABLE: {
+ if (socksProxyEnable) {
+ notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.SocksSocketFactory");
+ } else {
+ notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
+ }
+ break;
+ }
+ }
+ }
+ });
+
+ }
+
+ /**
+ * Change notifications on properties (by name). A property might not be
+ * reflected as a ConfProp enum. If it is, the notification is forwarded
+ * to the ConfProp notifyChange method. If not, it is processed here.
+ *
+ * @param source
+ * @param propName
+ * @param propValue
+ */
+ void notifyChange(ChangeListener source, String propName, String propValue) {
+ ConfProp prop = location.getConfPropForName(propName);
+ if (prop != null)
+ notifyChange(source, prop, propValue);
+ else
+ location.setConfPropValue(propName, propValue);
+ }
+
+ /**
+ * Broadcast a property change to all registered tabs. If a tab is
+ * identified as the source of the change, this tab will not be
+ * notified.
+ *
+ * @param source
+ * TODO
+ * @param prop
+ * @param value
+ */
+ private void fireChange(ChangeListener source, ConfProp prop, String value) {
+ for (ChangeListener tab : tabs) {
+ if (tab != source)
+ tab.notifyChange(prop, value);
+ }
+ }
+
+ }
+
+ /**
+ * Create a SWT Text component for the given {@link ConfProp} text
+ * configuration property.
+ *
+ * @param listener
+ * @param parent
+ * @param prop
+ * @return
+ */
+ private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
+ Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+ GridData data = new GridData(GridData.FILL_HORIZONTAL);
+ text.setLayoutData(data);
+ text.setData("hProp", prop);
+ text.setText(location.getConfPropValue(prop));
+ text.addModifyListener(listener);
+ return text;
+ }
+
+ /**
+ * Create a SWT Checked Button component for the given {@link ConfProp}
+ * boolean configuration property.
+ *
+ * @param listener
+ * @param parent
+ * @param prop
+ * @return
+ */
+ private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
+ Button button = new Button(parent, SWT.CHECK);
+ button.setText(text);
+ button.setData("hProp", prop);
+ button.setSelection(location.getConfPropValue(prop).equalsIgnoreCase("yes"));
+ button.addSelectionListener(listener);
+ return button;
+ }
+
+ /**
+ * Create editor entry for the given configuration property. The editor is a
+ * couple (Label, Text).
+ *
+ * @param listener
+ * the listener to trigger on property change
+ * @param parent
+ * the SWT parent container
+ * @param prop
+ * the property to create an editor for
+ * @param labelText
+ * a label (null will defaults to the property name)
+ *
+ * @return a SWT Text field
+ */
+ private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
+ Label label = new Label(parent, SWT.NONE);
+ if (labelText == null)
+ labelText = location.getConfPropName(prop);
+ label.setText(labelText);
+ return createConfText(listener, parent, prop);
+ }
+
+ /**
+ * Create an editor entry for the given configuration name
+ *
+ * @param listener
+ * the listener to trigger on property change
+ * @param parent
+ * the SWT parent container
+ * @param propName
+ * the name of the property to create an editor for
+ * @param labelText
+ * a label (null will defaults to the property name)
+ *
+ * @return a SWT Text field
+ */
+ private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
+
+ {
+ ConfProp prop = location.getConfPropForName(propName);
+ if (prop != null)
+ return createConfLabelText(listener, parent, prop, labelText);
+ }
+
+ Label label = new Label(parent, SWT.NONE);
+ if (labelText == null)
+ labelText = propName;
+ label.setText(labelText);
+
+ Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+ GridData data = new GridData(GridData.FILL_HORIZONTAL);
+ text.setLayoutData(data);
+ text.setData("hPropName", propName);
+ text.setText(location.getConfPropValue(propName));
+ text.addModifyListener(listener);
+
+ return text;
+ }
+
+ /**
+ * Main parameters of the Hadoop location: <li>host and port of the
+ * Map/Reduce master (Job tracker) <li>host and port of the DFS master (Name
+ * node) <li>SOCKS proxy
+ */
+ private class TabMain implements ChangeListener, ModifyListener, SelectionListener {
+
+ TabMediator mediator;
+
+ Text textJTHost;
+
+ Text textNNHost;
+
+ Button colocateMasters;
+
+ Text textJTPort;
+
+ Text textNNPort;
+
+ Text userName;
+
+ Button useSocksProxy;
+
+ Text socksProxyHost;
+
+ Text socksProxyPort;
+
+ private Group groupMR;
+
+ TabMain(TabMediator mediator) {
+ this.mediator = mediator;
+ TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+ tab.setText("General");
+ tab.setToolTipText("General location parameters");
+ tab.setControl(createControl(mediator.folder));
+ }
+
+ private Control createControl(Composite parent) {
+
+ Composite panel = new Composite(parent, SWT.FILL);
+ panel.setLayout(new GridLayout(2, false));
+
+ GridData data;
+
+ /*
+ * Map/Reduce group
+ */
+ {
+ groupMR = new Group(panel, SWT.SHADOW_NONE);
+ groupMR.setText("Map/Reduce Master Node");
+ groupMR.setToolTipText("Address of the Map/Reduce Master node.");
+ GridLayout layout = new GridLayout(2, false);
+ groupMR.setLayout(layout);
+ data = new GridData();
+ data.verticalAlignment = SWT.FILL;
+ data.horizontalAlignment = SWT.CENTER;
+ data.widthHint = 250;
+ groupMR.setLayoutData(data);
+
+ // Job Tracker host
+ Label label = new Label(groupMR, SWT.NONE);
+ label.setText("Host:");
+ data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+ label.setLayoutData(data);
+
+ textJTHost = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_HOST);
+ data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+ textJTHost.setLayoutData(data);
+
+ // Job Tracker port
+ label = new Label(groupMR, SWT.NONE);
+ label.setText("Port:");
+ data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+ label.setLayoutData(data);
+
+ textJTPort = createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_PORT);
+ data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+ textJTPort.setLayoutData(data);
+ }
+
+ /*
+ * DFS group
+ */
+ {
+ Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+ groupDFS.setText("DFS Master");
+ groupDFS.setToolTipText("Address of the Distributed FileSystem " + "master node (the Name Node).");
+ GridLayout layout = new GridLayout(2, false);
+ groupDFS.setLayout(layout);
+ data = new GridData();
+ data.horizontalAlignment = SWT.CENTER;
+ data.widthHint = 250;
+ groupDFS.setLayoutData(data);
+
+ colocateMasters = createConfCheckButton(this, groupDFS, ConfProp.PI_COLOCATE_MASTERS, "Use M/R Master host");
+ data = new GridData();
+ data.horizontalSpan = 2;
+ colocateMasters.setLayoutData(data);
+
+ // Job Tracker host
+ Label label = new Label(groupDFS, SWT.NONE);
+ data = new GridData();
+ label.setText("Host:");
+ label.setLayoutData(data);
+
+ textNNHost = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
+
+ // Job Tracker port
+ label = new Label(groupDFS, SWT.NONE);
+ data = new GridData();
+ label.setText("Port:");
+ label.setLayoutData(data);
+
+ textNNPort = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
+ }
+
+ {
+ Composite subpanel = new Composite(panel, SWT.FILL);
+ subpanel.setLayout(new GridLayout(2, false));
+ data = new GridData();
+ data.horizontalSpan = 2;
+ data.horizontalAlignment = SWT.FILL;
+ subpanel.setLayoutData(data);
+
+ userName = createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME, "&User name:");
+ }
+
+ // SOCKS proxy group
+ {
+ Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
+ groupSOCKS.setText("SOCKS proxy");
+ groupSOCKS.setToolTipText("Address of the SOCKS proxy to use " + "to connect to the infrastructure.");
+ GridLayout layout = new GridLayout(2, false);
+ groupSOCKS.setLayout(layout);
+ data = new GridData();
+ data.horizontalAlignment = SWT.CENTER;
+ data.horizontalSpan = 2;
+ data.widthHint = 250;
+ groupSOCKS.setLayoutData(data);
+
+ useSocksProxy = createConfCheckButton(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
+ data = new GridData();
+ data.horizontalSpan = 2;
+ useSocksProxy.setLayoutData(data);
+
+ // SOCKS proxy host
+ Label label = new Label(groupSOCKS, SWT.NONE);
+ data = new GridData();
+ label.setText("Host:");
+ label.setLayoutData(data);
+
+ socksProxyHost = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
+
+ // SOCKS proxy port
+ label = new Label(groupSOCKS, SWT.NONE);
+ data = new GridData();
+ label.setText("Port:");
+ label.setLayoutData(data);
+
+ socksProxyPort = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
+ }
+
+ // Update the state of all widgets according to the current values!
+ reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
+ reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
+ reloadConfProp(ConfProp.PI_HADOOP_VERSION);
+
+ return panel;
+ }
+
+ /**
+ * Reload the given configuration property value
+ *
+ * @param prop
+ */
+ private void reloadConfProp(ConfProp prop) {
+ this.notifyChange(prop, location.getConfPropValue(prop));
+ }
+
+ public void notifyChange(ConfProp prop, String propValue) {
+ switch (prop) {
+ case PI_JOB_TRACKER_HOST: {
+ textJTHost.setText(propValue);
+ break;
+ }
+ case PI_JOB_TRACKER_PORT: {
+ textJTPort.setText(propValue);
+ break;
+ }
+ case PI_USER_NAME: {
+ userName.setText(propValue);
+ break;
+ }
+ case PI_COLOCATE_MASTERS: {
+ if (colocateMasters != null) {
+ boolean colocate = propValue.equalsIgnoreCase("yes");
+ colocateMasters.setSelection(colocate);
+ if (textNNHost != null) {
+ textNNHost.setEnabled(!colocate);
+ }
+ }
+ break;
+ }
+ case PI_NAME_NODE_HOST: {
+ textNNHost.setText(propValue);
+ break;
+ }
+ case PI_NAME_NODE_PORT: {
+ textNNPort.setText(propValue);
+ break;
+ }
+ case PI_SOCKS_PROXY_ENABLE: {
+ if (useSocksProxy != null) {
+ boolean useProxy = propValue.equalsIgnoreCase("yes");
+ useSocksProxy.setSelection(useProxy);
+ if (socksProxyHost != null)
+ socksProxyHost.setEnabled(useProxy);
+ if (socksProxyPort != null)
+ socksProxyPort.setEnabled(useProxy);
+ }
+ break;
+ }
+ case PI_SOCKS_PROXY_HOST: {
+ socksProxyHost.setText(propValue);
+ break;
+ }
+ case PI_SOCKS_PROXY_PORT: {
+ socksProxyPort.setText(propValue);
+ break;
+ }
+ }
+ }
+
+ /* @inheritDoc */
+ public void modifyText(ModifyEvent e) {
+ final Text text = (Text) e.widget;
+ final ConfProp prop = (ConfProp) text.getData("hProp");
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ mediator.notifyChange(TabMain.this, prop, text.getText());
+ }
+ });
+ }
+
+ /* @inheritDoc */
+ public void widgetDefaultSelected(SelectionEvent e) {
+ this.widgetSelected(e);
+ }
+
+ /* @inheritDoc */
+ public void widgetSelected(SelectionEvent e) {
+ final Button button = (Button) e.widget;
+ final ConfProp prop = (ConfProp) button.getData("hProp");
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ // We want to receive the update also!
+ mediator.notifyChange(null, prop, button.getSelection() ? "yes" : "no");
+ }
+ });
+ }
+
+ }
+
+ private class TabAdvanced implements ChangeListener, ModifyListener {
+ TabMediator mediator;
+ private Composite panel;
+ private Map<String, Text> textMap = new TreeMap<String, Text>();
+
+ TabAdvanced(TabMediator mediator) {
+ this.mediator = mediator;
+ TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+ tab.setText("Advanced parameters");
+ tab.setToolTipText("Access to advanced Hadoop parameters");
+ tab.setControl(createControl(mediator.folder));
+
+ }
+
+ private Control createControl(Composite parent) {
+ ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
+ panel = buildPanel(sc);
+ sc.setContent(panel);
+ sc.setExpandHorizontal(true);
+ sc.setExpandVertical(true);
+ sc.setMinSize(640, 480);
+ sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+ return sc;
+ }
+
+ private Composite buildPanel(Composite parent) {
+ Composite panel = new Composite(parent, SWT.NONE);
+ GridLayout layout = new GridLayout();
+ layout.numColumns = 2;
+ layout.makeColumnsEqualWidth = false;
+ panel.setLayout(layout);
+ panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1));
+
+ // Sort by property name
+ SortedMap<String, String> map = new TreeMap<String, String>();
+ Iterator<Entry<String, String>> it = location.getConfiguration();
+ while (it.hasNext()) {
+ Entry<String, String> entry = it.next();
+ map.put(entry.getKey(), entry.getValue());
+ }
+
+ for (Entry<String, String> entry : map.entrySet()) {
+ Text text = createConfNameEditor(this, panel, entry.getKey(), null);
+ textMap.put(entry.getKey(), text);
+ }
+ return panel;
+ }
+
+ public void notifyChange(ConfProp prop, final String propValue) {
+ Text text = textMap.get(location.getConfPropName(prop));
+ text.setText(propValue);
+ }
+
+ public void modifyText(ModifyEvent e) {
+ final Text text = (Text) e.widget;
+ Object hProp = text.getData("hProp");
+ final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
+ Object hPropName = text.getData("hPropName");
+ final String propName = (hPropName != null) ? (String) hPropName : null;
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ if (prop != null)
+ mediator.notifyChange(TabAdvanced.this, prop, text.getText());
+ else
+ mediator.notifyChange(TabAdvanced.this, propName, text.getText());
+ }
+ });
+ }
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder#update(org.apache.hdt.core.launch.ConfProp, java.lang.String)
+ */
+ @Override
+ public void notifyChange(ConfProp confProp, String text) {
+ mediator.notifyChange(null, ConfProp.PI_LOCATION_NAME, text);
+
+ }
+ /* (non-Javadoc)
+ * @see org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder#setChangeListener(org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener)
+ */
+ @Override
+ public void setChangeListener(ChangeListener l) {
+ changelistener=l;
+ }
+
+}
\ No newline at end of file
diff --git a/org.apache.hdt.hadoop2.release/.classpath b/org.apache.hdt.hadoop2.release/.classpath
new file mode 100644
index 0000000..d59ac75
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.classpath
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
+ <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+ <classpathentry kind="src" path="src/"/>
+ <classpathentry exported="true" kind="lib" path="jars/activation-1.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/aopalliance-1.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/asm-3.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/avro-1.7.4.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-beanutils-1.7.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-beanutils-core-1.8.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-cli-1.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-codec-1.4.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-collections-3.2.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-compress-1.4.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-configuration-1.6.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-digester-1.8.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-el-1.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-httpclient-3.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-io-2.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-lang-2.4.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-logging-1.1.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-math-2.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/commons-net-3.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/gmbal-api-only-3.0.0-b023.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/grizzly-framework-2.1.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/grizzly-http-2.1.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/grizzly-http-server-2.1.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/grizzly-http-servlet-2.1.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/grizzly-rcm-2.1.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/guava-11.0.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/guice-3.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/guice-servlet-3.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-annotations-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-auth-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-client-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-common-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-hdfs-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-app-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-common-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-core-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-jobclient-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-mapreduce-client-shuffle-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-api-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-client-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-common-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-server-common-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hadoop-yarn-server-tests-2.2.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/hamcrest-core-1.3.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jackson-core-asl-1.8.8.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jackson-jaxrs-1.8.3.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jackson-mapper-asl-1.8.8.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jackson-xc-1.8.3.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jasper-compiler-5.5.23.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jasper-runtime-5.5.23.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/javax.inject-1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/javax.servlet-3.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/javax.servlet-api-3.0.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jaxb-api-2.2.2.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jaxb-impl-2.2.3-1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-client-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-core-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-grizzly2-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-guice-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-json-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-server-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-test-framework-core-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jersey-test-framework-grizzly2-1.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jets3t-0.6.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jettison-1.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jetty-6.1.26.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jetty-util-6.1.26.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jsch-0.1.42.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jsp-api-2.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/jsr305-1.3.9.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/junit-4.11.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/log4j-1.2.15.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/management-api-3.0.0-b012.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/netty-3.6.2.Final.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/paranamer-2.3.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/protobuf-java-2.5.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/servlet-api-2.5.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/slf4j-api-1.6.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/slf4j-log4j12-1.6.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/snappy-java-1.0.4.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/stax-api-1.0.1.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/xmlenc-0.52.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/xz-1.0.jar"/>
+ <classpathentry exported="true" kind="lib" path="jars/zookeeper-3.4.5.jar"/>
+ <classpathentry kind="output" path="target/classes"/>
+</classpath>
diff --git a/org.apache.hdt.hadoop2.release/.settings/org.eclipse.core.resources.prefs b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.core.resources.prefs
new file mode 100644
index 0000000..99f26c0
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.core.resources.prefs
@@ -0,0 +1,2 @@
+eclipse.preferences.version=1
+encoding/<project>=UTF-8
diff --git a/org.apache.hdt.hadoop2.release/.settings/org.eclipse.jdt.core.prefs b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000..c537b63
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,7 @@
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.source=1.6
diff --git a/org.apache.hdt.hadoop2.release/.settings/org.eclipse.m2e.core.prefs b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.m2e.core.prefs
new file mode 100644
index 0000000..f897a7f
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/.settings/org.eclipse.m2e.core.prefs
@@ -0,0 +1,4 @@
+activeProfiles=
+eclipse.preferences.version=1
+resolveWorkspaceProjects=true
+version=1
diff --git a/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
new file mode 100644
index 0000000..3a6e69f
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/META-INF/MANIFEST.MF
@@ -0,0 +1,98 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: Apache Hadoop2 Release Eclipse Plugin
+Bundle-SymbolicName: org.apache.hdt.hadoop2.release;singleton:=true
+Bundle-Version: 0.0.2.incubating
+Bundle-Vendor: Apache Hadoop
+Bundle-RequiredExecutionEnvironment: JavaSE-1.6
+Require-Bundle: org.apache.hdt.core,
+ org.eclipse.core.runtime,
+ org.eclipse.core.filesystem;bundle-version="1.3.0";visibility:=reexport,
+ org.eclipse.core.resources;bundle-version="3.6.0",
+ org.eclipse.swt,
+ org.eclipse.jface
+Bundle-ClassPath: .,
+ jars/activation-1.1.jar,
+ jars/aopalliance-1.0.jar,
+ jars/asm-3.1.jar,
+ jars/avro-1.7.4.jar,
+ jars/commons-beanutils-1.7.0.jar,
+ jars/commons-beanutils-core-1.8.0.jar,
+ jars/commons-cli-1.2.jar,
+ jars/commons-codec-1.4.jar,
+ jars/commons-collections-3.2.1.jar,
+ jars/commons-compress-1.4.1.jar,
+ jars/commons-configuration-1.6.jar,
+ jars/commons-digester-1.8.jar,
+ jars/commons-el-1.0.jar,
+ jars/commons-httpclient-3.1.jar,
+ jars/commons-io-2.1.jar,
+ jars/commons-lang-2.4.jar,
+ jars/commons-logging-1.1.1.jar,
+ jars/commons-math-2.1.jar,
+ jars/commons-net-3.1.jar,
+ jars/gmbal-api-only-3.0.0-b023.jar,
+ jars/grizzly-framework-2.1.2.jar,
+ jars/grizzly-http-2.1.2.jar,
+ jars/grizzly-http-server-2.1.2.jar,
+ jars/grizzly-http-servlet-2.1.2.jar,
+ jars/grizzly-rcm-2.1.2.jar,
+ jars/guava-11.0.2.jar,
+ jars/guice-3.0.jar,
+ jars/guice-servlet-3.0.jar,
+ jars/hadoop-annotations-2.2.0.jar,
+ jars/hadoop-auth-2.2.0.jar,
+ jars/hadoop-client-2.2.0.jar,
+ jars/hadoop-common-2.2.0.jar,
+ jars/hadoop-hdfs-2.2.0.jar,
+ jars/hadoop-mapreduce-client-app-2.2.0.jar,
+ jars/hadoop-mapreduce-client-common-2.2.0.jar,
+ jars/hadoop-mapreduce-client-core-2.2.0.jar,
+ jars/hadoop-mapreduce-client-jobclient-2.2.0.jar,
+ jars/hadoop-mapreduce-client-shuffle-2.2.0.jar,
+ jars/hadoop-yarn-api-2.2.0.jar,
+ jars/hadoop-yarn-client-2.2.0.jar,
+ jars/hadoop-yarn-common-2.2.0.jar,
+ jars/hadoop-yarn-server-common-2.2.0.jar,
+ jars/hadoop-yarn-server-tests-2.2.0.jar,
+ jars/hamcrest-core-1.3.jar,
+ jars/jackson-core-asl-1.8.8.jar,
+ jars/jackson-jaxrs-1.8.3.jar,
+ jars/jackson-mapper-asl-1.8.8.jar,
+ jars/jackson-xc-1.8.3.jar,
+ jars/jasper-compiler-5.5.23.jar,
+ jars/jasper-runtime-5.5.23.jar,
+ jars/javax.inject-1.jar,
+ jars/javax.servlet-3.1.jar,
+ jars/javax.servlet-api-3.0.1.jar,
+ jars/jaxb-api-2.2.2.jar,
+ jars/jaxb-impl-2.2.3-1.jar,
+ jars/jersey-client-1.9.jar,
+ jars/jersey-core-1.9.jar,
+ jars/jersey-grizzly2-1.9.jar,
+ jars/jersey-guice-1.9.jar,
+ jars/jersey-json-1.9.jar,
+ jars/jersey-server-1.9.jar,
+ jars/jersey-test-framework-core-1.9.jar,
+ jars/jersey-test-framework-grizzly2-1.9.jar,
+ jars/jets3t-0.6.1.jar,
+ jars/jettison-1.1.jar,
+ jars/jetty-6.1.26.jar,
+ jars/jetty-util-6.1.26.jar,
+ jars/jsch-0.1.42.jar,
+ jars/jsp-api-2.1.jar,
+ jars/jsr305-1.3.9.jar,
+ jars/junit-4.11.jar,
+ jars/log4j-1.2.15.jar,
+ jars/management-api-3.0.0-b012.jar,
+ jars/netty-3.6.2.Final.jar,
+ jars/paranamer-2.3.jar,
+ jars/protobuf-java-2.5.0.jar,
+ jars/servlet-api-2.5.jar,
+ jars/slf4j-api-1.6.1.jar,
+ jars/slf4j-log4j12-1.6.1.jar,
+ jars/snappy-java-1.0.4.1.jar,
+ jars/stax-api-1.0.1.jar,
+ jars/xmlenc-0.52.jar,
+ jars/xz-1.0.jar,
+ jars/zookeeper-3.4.5.jar
diff --git a/org.apache.hdt.hadoop2.release/build.properties b/org.apache.hdt.hadoop2.release/build.properties
new file mode 100644
index 0000000..848ab4a
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/build.properties
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source.. = src/
+output.. = bin/
+bin.includes = META-INF/,\
+ .,\
+ plugin.xml,\
+ jars/
diff --git a/org.apache.hdt.hadoop.release/fragment.xml b/org.apache.hdt.hadoop2.release/plugin.xml
similarity index 65%
copy from org.apache.hdt.hadoop.release/fragment.xml
copy to org.apache.hdt.hadoop2.release/plugin.xml
index 1b11581..2b14915 100644
--- a/org.apache.hdt.hadoop.release/fragment.xml
+++ b/org.apache.hdt.hadoop2.release/plugin.xml
@@ -16,21 +16,27 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-<fragment>
+<plugin>
+<extension
+ point="org.apache.hdt.core.hadoopCluster" >
+ <hadoopCluster
+ class="org.apache.hdt.hadoop2.release.HadoopCluster"
+ protocolVersion="2.2">
+ </hadoopCluster>
+ </extension>
<extension
point="org.apache.hdt.core.hdfsClient">
<hdfsClient
- class="org.apache.hdt.hadoop.release.HDFSClientRelease"
+ class="org.apache.hdt.hadoop2.release.HDFSClientRelease"
protocol="hdfs"
- protocolVersion="1.1.2.21">
+ protocolVersion="2.2">
</hdfsClient>
</extension>
<extension
- point="org.apache.hdt.core.zookeeperClient">
- <zookeeperClient
- class="org.apache.hdt.hadoop.release.ZooKeeperClientRelease"
- protocolVersion="3.4.5">
- </zookeeperClient>
+ point="org.apache.hdt.core.hadoopHomeReader">
+ <hadoopHomeReader
+ class="org.apache.hdt.hadoop2.release.HadoopHomeReader"
+ protocolVersion="2.2">
+ </hadoopHomeReader>
</extension>
-
-</fragment>
+</plugin>
diff --git a/org.apache.hdt.hadoop2.release/pom.xml b/org.apache.hdt.hadoop2.release/pom.xml
new file mode 100644
index 0000000..74db93b
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/pom.xml
@@ -0,0 +1,120 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <relativePath>../pom.xml</relativePath>
+ <groupId>org.apache.hdt</groupId>
+ <artifactId>hdt.master</artifactId>
+ <version>0.0.2.incubating</version>
+ </parent>
+ <artifactId>org.apache.hdt.hadoop2.release</artifactId>
+ <packaging>eclipse-plugin</packaging>
+ <name>Apache Hadoop2 Devlopment Tools Assembly</name>
+
+ <properties>
+ <hadoop2.version>2.2.0</hadoop2.version>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-common</artifactId>
+ <version>${hadoop2.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-core</artifactId>
+ <version>${hadoop2.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-client</artifactId>
+ <version>${hadoop2.version}</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <sourceDirectory>src</sourceDirectory>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <version>2.8</version>
+ <executions>
+ <execution>
+ <id>copy-dependencies</id>
+ <phase>initialize</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <excludeScope>system</excludeScope>
+ <outputDirectory>${basedir}/jars</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>false</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ </configuration>
+ </execution>
+ <execution>
+ <id>copy</id>
+ <phase>initialize</phase>
+ <goals>
+ <goal>copy</goal>
+ </goals>
+ <configuration>
+ <artifactItems>
+ <artifactItem>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <overWrite>false</overWrite>
+ </artifactItem>
+ <artifactItem>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-tests</artifactId>
+ <version>${hadoop2.version}</version>
+ <overWrite>false</overWrite>
+ </artifactItem>
+ <artifactItem>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <overWrite>false</overWrite>
+ </artifactItem>
+ <artifactItem>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <overWrite>false</overWrite>
+ </artifactItem>
+ <artifactItem>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <overWrite>false</overWrite>
+ </artifactItem>
+ </artifactItems>
+ <outputDirectory>${basedir}/jars</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>false</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HDFSClientRelease.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HDFSClientRelease.java
new file mode 100644
index 0000000..72874da
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HDFSClientRelease.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop2.release;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hdt.core.hdfs.ResourceInformation;
+import org.apache.log4j.Logger;
+
+/**
+ * HDFS Client for HDFS version 1.1.2.21.
+ *
+ * @author Srimanth Gunturi
+ */
+public class HDFSClientRelease extends org.apache.hdt.core.hdfs.HDFSClient {
+
+ private static Logger logger = Logger.getLogger(HDFSClientRelease.class);
+ private Configuration config;
+
+ public HDFSClientRelease() {
+ config = new Configuration();
+ }
+
+ private ResourceInformation getResourceInformation(FileStatus fileStatus) {
+ ResourceInformation fi = new ResourceInformation();
+ fi.setFolder(fileStatus.isDir());
+ fi.setGroup(fileStatus.getGroup());
+ fi.setLastAccessedTime(fileStatus.getAccessTime());
+ fi.setLastModifiedTime(fileStatus.getAccessTime());
+ fi.setName(fileStatus.getPath().getName());
+ fi.setOwner(fileStatus.getOwner());
+ fi.setPath(fileStatus.getPath().getParent() == null ? "/" : fileStatus.getPath().getParent().toString());
+ fi.setReplicationFactor(fileStatus.getReplication());
+ fi.setSize(fileStatus.getLen());
+ FsPermission fsPermission = fileStatus.getPermission();
+ updatePermissions(fi.getUserPermissions(), fsPermission.getUserAction());
+ updatePermissions(fi.getGroupPermissions(), fsPermission.getGroupAction());
+ updatePermissions(fi.getOtherPermissions(), fsPermission.getOtherAction());
+ return fi;
+ }
+
+ private void updatePermissions(ResourceInformation.Permissions permissions, FsAction action) {
+ permissions.read = action.implies(FsAction.READ);
+ permissions.write = action.implies(FsAction.WRITE);
+ permissions.execute = action.implies(FsAction.EXECUTE);
+ }
+
+ protected FileSystem createFS(URI uri, String user) throws IOException, InterruptedException{
+ if(user==null)
+ return FileSystem.get(uri, config);
+ return FileSystem.get(uri, config, user);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.hdt.core.hdfs.HDFSClient#getResource(java.net.URI)
+ */
+ @Override
+ public ResourceInformation getResourceInformation(URI uri, String user) throws IOException, InterruptedException {
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ FileStatus fileStatus = null;
+ ResourceInformation fi = null;
+ try {
+ fileStatus = fs.getFileStatus(path);
+ fi = getResourceInformation(fileStatus);
+ } catch (FileNotFoundException fne) {
+ logger.info(fne.getMessage());
+ logger.debug(fne.getMessage(), fne);
+ }
+ return fi;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.hdt.core.hdfs.HDFSClient#setResource(java.net.URI,
+ * org.apache.hdt.core.hdfs.ResourceInformation)
+ */
+ @Override
+ public void setResourceInformation(URI uri, ResourceInformation information, String user) throws IOException, InterruptedException {
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ if (!information.isFolder()) {
+ fs.setTimes(path, information.getLastModifiedTime(), information.getLastAccessedTime());
+ }
+ if (information.getOwner() != null || information.getGroup() != null)
+ fs.setOwner(path, information.getOwner(), information.getGroup());
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.hdfs.HDFSClient#listResources(java.net.URI)
+ */
+ @Override
+ public List<ResourceInformation> listResources(URI uri, String user) throws IOException, InterruptedException {
+ List<ResourceInformation> ris = null;
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ FileStatus[] listStatus = fs.listStatus(path);
+ if (listStatus != null) {
+ ris = new ArrayList<ResourceInformation>();
+ for (FileStatus ls : listStatus) {
+ ris.add(getResourceInformation(ls));
+ }
+ }
+ return ris;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+ * org.eclipse.core.runtime.IProgressMonitor)
+ */
+ @Override
+ public InputStream openInputStream(URI uri, String user) throws IOException, InterruptedException {
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ FSDataInputStream open = fs.open(path);
+ return open;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+ * org.eclipse.core.runtime.IProgressMonitor)
+ */
+ @Override
+ public OutputStream createOutputStream(URI uri, String user) throws IOException, InterruptedException {
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ FSDataOutputStream outputStream = fs.create(path);
+ return outputStream;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.hdfs.HDFSClient#openInputStream(java.net.URI,
+ * org.eclipse.core.runtime.IProgressMonitor)
+ */
+ @Override
+ public OutputStream openOutputStream(URI uri, String user) throws IOException, InterruptedException {
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ // TODO. Temporary fix till Issue#3 is fixed.
+ FSDataOutputStream outputStream = fs.create(path);
+ return outputStream;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.hdt.core.hdfs.HDFSClient#mkdirs(java.net.URI,
+ * org.eclipse.core.runtime.IProgressMonitor)
+ */
+ @Override
+ public boolean mkdirs(URI uri, String user) throws IOException, InterruptedException {
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ return fs.mkdirs(path);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.hdt.core.hdfs.HDFSClient#delete(java.net.URI,
+ * org.eclipse.core.runtime.IProgressMonitor)
+ */
+ @Override
+ public void delete(URI uri, String user) throws IOException, InterruptedException {
+ FileSystem fs = createFS(uri, user);
+ Path path = new Path(uri.getPath());
+ fs.delete(path, true);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.hdfs.HDFSClient#getDefaultUserAndGroupIds()
+ */
+ @Override
+ public List<String> getDefaultUserAndGroupIds() throws IOException {
+ List<String> idList = new ArrayList<String>();
+ UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+ idList.add(currentUser.getShortUserName());
+ String[] groupIds = currentUser.getGroupNames();
+ if (groupIds != null) {
+ for (String groupId : groupIds) {
+ idList.add(groupId);
+ }
+ }
+ return idList;
+ }
+
+}
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
new file mode 100644
index 0000000..d18679a
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopCluster.java
@@ -0,0 +1,537 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Logger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.Activator;
+import org.apache.hdt.core.HadoopVersion;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.ConfProp;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ *
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D<port>
+ * <host></tt>)
+ *
+ * <p>
+ * <em> TODO </em>
+ * <li>Disable the updater if a location becomes unreachable or fails for tool
+ * long
+ * <li>Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster extends AbstractHadoopCluster {
+ private ExecutorService service= Executors.newSingleThreadExecutor();
+
+ /**
+ * Frequency of location status observations expressed as the delay in ms
+ * between each observation
+ *
+ * TODO Add a preference parameter for this
+ */
+ protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+ /**
+ *
+ */
+ public class LocationStatusUpdater extends Job {
+
+ JobClient client = null;
+
+ /**
+ * Setup the updater
+ */
+ public LocationStatusUpdater() {
+ super("Map/Reduce location status updater");
+ this.setSystem(true);
+ }
+
+ /* @inheritDoc */
+ @Override
+ protected IStatus run(IProgressMonitor monitor) {
+ if (client == null) {
+ try {
+ client = HadoopCluster.this.getJobClient();
+
+ } catch (IOException ioe) {
+ client = null;
+ return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot connect to the Map/Reduce location: "
+ + HadoopCluster.this.getLocationName(), ioe);
+ }
+ }
+ Thread current = Thread.currentThread();
+ ClassLoader oldLoader = current.getContextClassLoader();
+ try {
+ current.setContextClassLoader(HadoopCluster.class.getClassLoader());
+ // Set of all known existing Job IDs we want fresh info of
+ Set<JobID> missingJobIds = new HashSet<JobID>(runningJobs.keySet());
+
+ JobStatus[] jstatus = client.jobsToComplete();
+ jstatus = jstatus == null ? new JobStatus[0] : jstatus;
+ for (JobStatus status : jstatus) {
+
+ JobID jobId = status.getJobID();
+ missingJobIds.remove(jobId);
+
+ HadoopJob hJob;
+ synchronized (HadoopCluster.this.runningJobs) {
+ hJob = runningJobs.get(jobId);
+ if (hJob == null) {
+ // Unknown job, create an entry
+ RunningJob running = client.getJob(jobId);
+ hJob = new HadoopJob(HadoopCluster.this, jobId, running, status);
+ newJob(hJob);
+ }
+ }
+
+ // Update HadoopJob with fresh infos
+ updateJob(hJob, status);
+ }
+
+ // Ask explicitly for fresh info for these Job IDs
+ for (JobID jobId : missingJobIds) {
+ HadoopJob hJob = runningJobs.get(jobId);
+ if (!hJob.isCompleted())
+ updateJob(hJob, null);
+ }
+
+ } catch (IOException ioe) {
+ client = null;
+ return new Status(Status.ERROR, Activator.BUNDLE_ID, 0, "Cannot retrieve running Jobs on location: " + HadoopCluster.this.getLocationName(),
+ ioe);
+ } finally {
+ current.setContextClassLoader(oldLoader);
+ }
+
+
+ // Schedule the next observation
+ schedule(STATUS_OBSERVATION_DELAY);
+
+ return Status.OK_STATUS;
+ }
+
+ /**
+ * Stores and make the new job available
+ *
+ * @param data
+ */
+ private void newJob(final HadoopJob data) {
+ runningJobs.put(data.jobId, data);
+
+ Display.getDefault().asyncExec(new Runnable() {
+ public void run() {
+ fireJobAdded(data);
+ }
+ });
+ }
+
+ /**
+ * Updates the status of a job
+ *
+ * @param job
+ * the job to update
+ */
+ private void updateJob(final HadoopJob job, JobStatus status) {
+ job.update(status);
+
+ Display.getDefault().asyncExec(new Runnable() {
+ public void run() {
+ fireJobChanged(job);
+ }
+ });
+ }
+
+ }
+
+ static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+ /**
+ * Hadoop configuration of the location. Also contains specific parameters
+ * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+ */
+ private Configuration conf;
+
+ /**
+ * Jobs listeners
+ */
+ private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+ /**
+ * Jobs running on this location. The keys of this map are the Job IDs.
+ */
+ private transient Map<JobID, HadoopJob> runningJobs = Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+ /**
+ * Status updater for this location
+ */
+ private LocationStatusUpdater statusUpdater;
+
+ // state and status - transient
+ private transient String state = "";
+
+ /**
+ * Creates a new default Hadoop location
+ */
+ public HadoopCluster() {
+ this.conf = new Configuration();
+ this.addPluginConfigDefaultProperties();
+ conf.set("mapreduce.framework.name", "yarn");
+ }
+
+ /**
+ * Create a new Hadoop location by copying an already existing one.
+ *
+ * @param source
+ * the location to copy
+ */
+ public HadoopCluster(HadoopCluster existing) {
+ this();
+ this.load(existing);
+ }
+
+ public void addJobListener(IJobListener l) {
+ jobListeners.add(l);
+ }
+
+ public void dispose() {
+ // TODO close DFS connections?
+ }
+
+ /**
+ * List all elements that should be present in the Server window (all
+ * servers and all jobs running on each servers)
+ *
+ * @return collection of jobs for this location
+ */
+ public Collection<? extends IHadoopJob> getJobs() {
+ startStatusUpdater();
+ return this.runningJobs.values();
+ }
+
+ /**
+ * Remove the given job from the currently running jobs map
+ *
+ * @param job
+ * the job to remove
+ */
+ public void purgeJob(final IHadoopJob job) {
+ runningJobs.remove(JobID.forName(job.getJobID()));
+ Display.getDefault().asyncExec(new Runnable() {
+ public void run() {
+ fireJobRemoved(job);
+ }
+ });
+ }
+
+ /**
+ * Returns the {@link Configuration} defining this location.
+ *
+ * @return the location configuration
+ */
+ public Iterator<Entry<String, String>> getConfiguration() {
+ return this.conf.iterator();
+ }
+
+ /**
+ * @return the conf
+ */
+ public Configuration getConf() {
+ return conf;
+ }
+
+ /**
+ * Gets a Hadoop configuration property value
+ *
+ * @param prop
+ * the configuration property
+ * @return the property value
+ */
+ public String getConfPropValue(ConfProp prop) {
+ String confPropName = getConfPropName(prop);
+ return conf.get(confPropName);
+ }
+
+ /**
+ * Gets a Hadoop configuration property value
+ *
+ * @param propName
+ * the property name
+ * @return the property value
+ */
+ public String getConfPropValue(String propName) {
+ return this.conf.get(propName);
+ }
+
+ public String getLocationName() {
+ return getConfPropValue(ConfProp.PI_LOCATION_NAME);
+ }
+
+ /**
+ * Returns the master host name of the Hadoop location (the Job tracker)
+ *
+ * @return the host name of the Job tracker
+ */
+ public String getMasterHostName() {
+ return getConfPropValue(ConfProp.PI_RESOURCE_MGR_HOST);
+ }
+
+ public String getState() {
+ return state;
+ }
+
+ /**
+ * Overwrite this location with the given existing location
+ *
+ * @param existing
+ * the existing location
+ */
+ public void load(AbstractHadoopCluster existing) {
+ this.conf = new Configuration(((HadoopCluster) existing).conf);
+ }
+
+ protected boolean loadConfiguration(Map<String, String> configuration) {
+ Configuration newConf = new Configuration(this.conf);
+ if (configuration == null)
+ return false;
+ for (Entry<String, String> entry : configuration.entrySet()) {
+ newConf.set(entry.getKey(), entry.getValue());
+ }
+
+
+ this.conf = newConf;
+ return true;
+ }
+
+ /**
+ * Sets a Hadoop configuration property value
+ *
+ * @param prop
+ * the property
+ * @param propvalue
+ * the property value
+ */
+ public void setConfPropValue(ConfProp prop, String propValue) {
+ if (propValue != null)
+ setConfPropValue(getConfPropName(prop), propValue);
+ }
+
+ @Override
+ public void setConfPropValue(String propName, String propValue) {
+ conf.set(propName, propValue);
+ }
+
+ public void setLocationName(String newName) {
+ setConfPropValue(ConfProp.PI_LOCATION_NAME, newName);
+ }
+
+ /**
+ * Write this location settings to the given output stream
+ *
+ * @param out
+ * the output stream
+ * @throws IOException
+ */
+ public void storeSettingsToFile(File file) throws IOException {
+ FileOutputStream fos = new FileOutputStream(file);
+ try {
+ this.conf.writeXml(fos);
+ fos.close();
+ fos = null;
+ } finally {
+ IOUtils.closeStream(fos);
+ }
+
+ }
+
+ /* @inheritDoc */
+ @Override
+ public String toString() {
+ return this.getLocationName();
+ }
+
+ /**
+ * Fill the configuration with valid default values
+ */
+ private void addPluginConfigDefaultProperties() {
+ for (ConfProp prop : ConfProp.values()) {
+ conf.set(getConfPropName(prop), prop.defVal);
+ }
+ }
+
+ /**
+ * Starts the location status updater
+ */
+ private synchronized void startStatusUpdater() {
+ if (statusUpdater == null) {
+ statusUpdater = new LocationStatusUpdater();
+ statusUpdater.schedule();
+ }
+ }
+
+ /*
+ * Rewrite of the connecting and tunneling to the Hadoop location
+ */
+
+ /**
+ * Provides access to the default file system of this location.
+ *
+ * @return a {@link FileSystem}
+ */
+ public FileSystem getDFS() throws IOException {
+ return FileSystem.get(this.conf);
+ }
+
+ /**
+ * Provides access to the Job tracking system of this location
+ *
+ * @return a {@link JobClient}
+ */
+ public JobClient getJobClient() throws IOException {
+ JobConf jconf = new JobConf(this.conf);
+ return new JobClient(jconf);
+ }
+
+ /*
+ * Listeners handling
+ */
+
+ protected void fireJarPublishDone(IJarModule jar) {
+ for (IJobListener listener : jobListeners) {
+ listener.publishDone(jar);
+ }
+ }
+
+ protected void fireJarPublishStart(IJarModule jar) {
+ for (IJobListener listener : jobListeners) {
+ listener.publishStart(jar);
+ }
+ }
+
+ protected void fireJobAdded(HadoopJob job) {
+ for (IJobListener listener : jobListeners) {
+ listener.jobAdded(job);
+ }
+ }
+
+ protected void fireJobRemoved(IHadoopJob job) {
+ for (IJobListener listener : jobListeners) {
+ listener.jobRemoved(job);
+ }
+ }
+
+ protected void fireJobChanged(HadoopJob job) {
+ for (IJobListener listener : jobListeners) {
+ listener.jobChanged(job);
+ }
+ }
+
+ @Override
+ public void saveConfiguration(File confDir, String jarFilePath) throws IOException {
+ // Prepare the Hadoop configuration
+ JobConf conf = new JobConf(this.conf);
+ conf.setJar(jarFilePath);
+ // Write it to the disk file
+ File coreSiteFile = new File(confDir, "core-site.xml");
+ File mapredSiteFile = new File(confDir, "yarn-site.xml");
+ FileOutputStream fos = new FileOutputStream(coreSiteFile);
+ FileInputStream fis = null;
+ try {
+ conf.writeXml(fos);
+ fos.close();
+ fos = new FileOutputStream(mapredSiteFile);
+ fis = new FileInputStream(coreSiteFile);
+ IOUtils.copyBytes(new BufferedInputStream(fis), fos, 4096);
+ } finally {
+ IOUtils.closeStream(fos);
+ IOUtils.closeStream(fis);
+ }
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hdt.core.launch.AbstractHadoopCluster#isAvailable()
+ */
+ @Override
+ public boolean isAvailable() throws CoreException {
+ Callable<JobClient> task= new Callable<JobClient>() {
+ @Override
+ public JobClient call() throws Exception {
+ return getJobClient();}};
+ Future<JobClient> jobClientFuture = service.submit(task);
+ try{
+ jobClientFuture.get(500, TimeUnit.SECONDS);
+ return true;
+ }catch(Exception e){
+ e.printStackTrace();
+ throw new CoreException(new Status(Status.ERROR,
+ Activator.BUNDLE_ID, "unable to connect to server", e));
+ }
+ }
+
+ @Override
+ public HadoopVersion getVersion() {
+ return HadoopVersion.Version2;
+ }
+
+
+ @Override
+ public HadoopConfigurationBuilder getUIConfigurationBuilder() {
+ return new HadoopV2ConfigurationBuilder(this);
+ }
+}
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopHomeReader.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopHomeReader.java
new file mode 100644
index 0000000..a45086c
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopHomeReader.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.hadoop2.release;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+
+public class HadoopHomeReader extends AbstractHadoopHomeReader {
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.AbstractHadoopHomeReader#validateHadoopHome(java.
+ * io.File)
+ */
+ @Override
+ public boolean validateHadoopHome(File location) {
+ File hadoopBin = new File(location, "bin");
+ File hadoopSBIn = new File(location, "sbin");
+ FilenameFilter gotHadoopYarn = new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ return (name.indexOf("yarn") != -1);
+ }
+ };
+ return hadoopBin.exists() && (hadoopBin.list(gotHadoopYarn).length > 0)
+ && hadoopSBIn.exists() && (hadoopSBIn.list(gotHadoopYarn).length > 0);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.hdt.core.AbstractHadoopHomeReader#getHadoopJars(java.io.File)
+ */
+ @Override
+ public List<File> getHadoopJars(File hadoopHome) {
+ File mrCommonHome = FileUtils.getFile(hadoopHome, "share","hadoop","common");
+ File mrCommonLib = FileUtils.getFile(mrCommonHome,"lib");
+ File hdfsHome = FileUtils.getFile(hadoopHome, "share","hadoop","hdfs");
+ File hdfsLib = FileUtils.getFile(hdfsHome,"lib");
+ File yarnHome = FileUtils.getFile(hadoopHome, "share","hadoop","yarn");
+ File yarnLib = FileUtils.getFile(yarnHome,"lib");
+ File mrHome = FileUtils.getFile(hadoopHome, "share","hadoop","mapreduce");
+ File mrLib = FileUtils.getFile(mrHome,"lib");
+
+ FilenameFilter jarFileFilter = new FilenameFilter() {
+ Set<String> selectedFileName= new HashSet<String>();
+ @Override
+ public boolean accept(File dir, String name) {
+ boolean accept = name.endsWith(".jar")
+ && !selectedFileName.contains(name);
+ if(accept){
+ selectedFileName.add(name);
+ }
+ return accept;
+ }
+ };
+ final ArrayList<File> coreJars = new ArrayList<File>();
+ coreJars.addAll(getJarFiles(mrCommonHome,jarFileFilter));
+ coreJars.addAll(getJarFiles(mrCommonLib,jarFileFilter));
+ coreJars.addAll(getJarFiles(hdfsHome,jarFileFilter));
+ coreJars.addAll(getJarFiles(hdfsLib,jarFileFilter));
+ coreJars.addAll(getJarFiles(yarnHome,jarFileFilter));
+ coreJars.addAll(getJarFiles(yarnLib,jarFileFilter));
+ coreJars.addAll(getJarFiles(mrHome,jarFileFilter));
+ coreJars.addAll(getJarFiles(mrLib,jarFileFilter));
+ return coreJars;
+ }
+
+ private ArrayList<File> getJarFiles(File hadoopHome, FilenameFilter jarFileFilter) {
+ final ArrayList<File> jars = new ArrayList<File>();
+ for (String hadopCoreLibFileName : hadoopHome.list(jarFileFilter)) {
+ jars.add(new File(hadoopHome, hadopCoreLibFileName));
+ }
+ return jars;
+ }
+
+}
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
new file mode 100644
index 0000000..a648cae
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopJob.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob implements IHadoopJob {
+
+ /**
+ * Enum representation of a Job state
+ */
+ public enum JobState {
+ PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+ final int state;
+
+ JobState(int state) {
+ this.state = state;
+ }
+
+ static JobState ofInt(int state) {
+ if (state == JobStatus.PREP) {
+ return PREPARE;
+ } else if (state == JobStatus.RUNNING) {
+ return RUNNING;
+ } else if (state == JobStatus.FAILED) {
+ return FAILED;
+ } else if (state == JobStatus.SUCCEEDED) {
+ return SUCCEEDED;
+ } else {
+ return null;
+ }
+ }
+ }
+
+ /**
+ * Location this Job runs on
+ */
+ private final HadoopCluster location;
+
+ /**
+ * Unique identifier of this Job
+ */
+ final JobID jobId;
+
+ /**
+ * Status representation of a running job. This actually contains a
+ * reference to a JobClient. Its methods might block.
+ */
+ RunningJob running;
+
+ /**
+ * Last polled status
+ *
+ * @deprecated should apparently not be used
+ */
+ JobStatus status;
+
+ /**
+ * Last polled counters
+ */
+ Counters counters;
+
+ /**
+ * Job Configuration
+ */
+ JobConf jobConf = null;
+
+ boolean completed = false;
+
+ boolean successful = false;
+
+ boolean killed = false;
+
+ int totalMaps;
+
+ int totalReduces;
+
+ int completedMaps;
+
+ int completedReduces;
+
+ float mapProgress;
+
+ float reduceProgress;
+
+ /**
+ * Constructor for a Hadoop job representation
+ *
+ * @param location
+ * @param id
+ * @param running
+ * @param status
+ */
+ public HadoopJob(HadoopCluster location, JobID id, RunningJob running, JobStatus status) {
+ this.location = location;
+ this.jobId = id;
+ this.running = running;
+ loadJobFile();
+ update(status);
+ }
+
+ /**
+ * Try to locate and load the JobConf file for this job so to get more
+ * details on the job (number of maps and of reduces)
+ */
+ private void loadJobFile() {
+ try {
+ String jobFile = getJobFile();
+ FileSystem fs = location.getDFS();
+ File tmp = File.createTempFile(getJobID().toString(), ".xml");
+ if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location.getConf())) {
+ this.jobConf = new JobConf(tmp.toString());
+
+ this.totalMaps = jobConf.getNumMapTasks();
+ this.totalReduces = jobConf.getNumReduceTasks();
+ }
+
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ }
+ }
+
+ /* @inheritDoc */
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+ result = prime * result + ((location == null) ? 0 : location.hashCode());
+ return result;
+ }
+
+ /* @inheritDoc */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (!(obj instanceof HadoopJob))
+ return false;
+ final HadoopJob other = (HadoopJob) obj;
+ if (jobId == null) {
+ if (other.jobId != null)
+ return false;
+ } else if (!jobId.equals(other.jobId))
+ return false;
+ if (location == null) {
+ if (other.location != null)
+ return false;
+ } else if (!location.equals(other.location))
+ return false;
+ return true;
+ }
+
+ /**
+ * Get the running status of the Job (@see {@link JobStatus}).
+ *
+ * @return
+ */
+ public String getState() {
+ if (this.completed) {
+ if (this.successful) {
+ return JobState.SUCCEEDED.toString();
+ } else {
+ return JobState.FAILED.toString();
+ }
+ } else {
+ return JobState.RUNNING.toString();
+ }
+ }
+
+ /**
+ * @return
+ */
+ public String getJobID() {
+ return this.jobId.toString();
+ }
+
+ /**
+ * @return
+ */
+ public AbstractHadoopCluster getLocation() {
+ return this.location;
+ }
+
+ /**
+ * @return
+ */
+ public boolean isCompleted() {
+ return this.completed;
+ }
+
+ /**
+ * @return
+ */
+ public String getJobName() {
+ return this.running.getJobName();
+ }
+
+ /**
+ * @return
+ */
+ public String getJobFile() {
+ return this.running.getJobFile();
+ }
+
+ /**
+ * Return the tracking URL for this Job.
+ *
+ * @return string representation of the tracking URL for this Job
+ */
+ public String getTrackingURL() {
+ return this.running.getTrackingURL();
+ }
+
+ /**
+ * Returns a string representation of this job status
+ *
+ * @return string representation of this job status
+ */
+ public String getStatus() {
+
+ StringBuffer s = new StringBuffer();
+
+ s.append("Maps : " + completedMaps + "/" + totalMaps);
+ s.append(" (" + mapProgress + ")");
+ s.append(" Reduces : " + completedReduces + "/" + totalReduces);
+ s.append(" (" + reduceProgress + ")");
+
+ return s.toString();
+ }
+
+ /**
+ * Update this job status according to the given JobStatus
+ *
+ * @param status
+ */
+ void update(JobStatus status) {
+ this.status = status;
+ try {
+ this.counters = running.getCounters();
+ this.completed = running.isComplete();
+ this.successful = running.isSuccessful();
+ this.mapProgress = running.mapProgress();
+ this.reduceProgress = running.reduceProgress();
+ // running.getTaskCompletionEvents(fromEvent);
+
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ }
+
+ this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+ this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+ }
+
+ /**
+ * Print this job counters (for debugging purpose)
+ */
+ void printCounters() {
+ System.out.printf("New Job:\n", counters);
+ for (String groupName : counters.getGroupNames()) {
+ Counters.Group group = counters.getGroup(groupName);
+ System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+ for (Counters.Counter counter : group) {
+ System.out.printf("\t\t%s: %s\n", counter.getDisplayName(), counter.getCounter());
+ }
+ }
+ System.out.printf("\n");
+ }
+
+ /**
+ * Kill this job
+ */
+ public void kill() {
+ try {
+ this.running.killJob();
+ this.killed = true;
+
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Print this job status (for debugging purpose)
+ */
+ public void display() {
+ System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+ System.out.printf("Configuration file: %s\n", getJobID());
+ System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+ System.out.printf("Completion: map: %f reduce %f\n", 100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+ System.out.println("Job total maps = " + totalMaps);
+ System.out.println("Job completed maps = " + completedMaps);
+ System.out.println("Map percentage complete = " + mapProgress);
+ System.out.println("Job total reduces = " + totalReduces);
+ System.out.println("Job completed reduces = " + completedReduces);
+ System.out.println("Reduce percentage complete = " + reduceProgress);
+ System.out.flush();
+ }
+
+}
diff --git a/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopV2ConfigurationBuilder.java b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopV2ConfigurationBuilder.java
new file mode 100644
index 0000000..02f05c4
--- /dev/null
+++ b/org.apache.hdt.hadoop2.release/src/org/apache/hdt/hadoop2/release/HadoopV2ConfigurationBuilder.java
@@ -0,0 +1,771 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.hadoop2.release;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder;
+import org.apache.hdt.core.launch.ConfProp;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.custom.ScrolledComposite;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Control;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.TabFolder;
+import org.eclipse.swt.widgets.TabItem;
+import org.eclipse.swt.widgets.Text;
+
+class HadoopV2ConfigurationBuilder implements HadoopConfigurationBuilder {
+
+ private AbstractHadoopCluster location;
+ private TabMediator mediator;
+ private ChangeListener changelistener;
+
+ public HadoopV2ConfigurationBuilder(AbstractHadoopCluster location) {
+ this.location = location;
+ }
+
+ @Override
+ public void buildControl(Composite panel) {
+ mediator = new TabMediator(panel);
+ GridData gdata = new GridData(GridData.FILL_BOTH);
+ gdata.horizontalSpan = 2;
+ mediator.folder.setLayoutData(gdata);
+ }
+
+ private interface TabListener {
+ void notifyChange(ConfProp prop, String propValue);
+ }
+
+ private class TabMediator {
+ TabFolder folder;
+ private Set<TabListener> tabs = new HashSet<TabListener>();
+
+ TabMediator(Composite parent) {
+ folder = new TabFolder(parent, SWT.NONE);
+ tabs.add(new TabMain(this));
+ tabs.add(new TabAdvanced(this));
+ }
+
+ /**
+ * Implements change notifications from any tab: update the
+ * location state and other tabs
+ *
+ * @param source
+ * origin of the notification (one of the tree tabs)
+ * @param propName
+ * modified property
+ * @param propValue
+ * new value
+ */
+ void notifyChange(TabListener source, final ConfProp prop, final String propValue) {
+ // Ignore notification when no change
+ String oldValue = location.getConfPropValue(prop);
+ if ((oldValue != null) && oldValue.equals(propValue))
+ return;
+
+ location.setConfPropValue(prop, propValue);
+ changelistener.notifyChange(prop, propValue);
+
+ this.fireChange(source, prop, propValue);
+
+ /*
+ * Now we deal with dependencies between settings
+ */
+ final String rmHost = location.getConfPropValue(ConfProp.PI_RESOURCE_MGR_HOST);
+ final String rmPort = location.getConfPropValue(ConfProp.PI_RESOURCE_MGR_PORT);
+ final String jhHost = location.getConfPropValue(ConfProp.PI_JOB_HISTORY_HOST);
+ final String jhPort = location.getConfPropValue(ConfProp.PI_JOB_HISTORY_PORT);
+ final String nameNodeHost = location.getConfPropValue(ConfProp.PI_NAME_NODE_HOST);
+ final String nameNodePort = location.getConfPropValue(ConfProp.PI_NAME_NODE_PORT);
+ final boolean colocate = location.getConfPropValue(ConfProp.PI_COLOCATE_MASTERS).equalsIgnoreCase("yes");
+ final String rmDefaultURI = location.getConfPropValue(ConfProp.RM_DEFAULT_URI);
+ final String jhDefaultURI = location.getConfPropValue(ConfProp.JOB_HISTORY_DEFAULT_URI);
+ final String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+ final String socksServerURI = location.getConfPropValue(ConfProp.SOCKS_SERVER);
+ final boolean socksProxyEnable = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_ENABLE).equalsIgnoreCase("yes");
+ final String socksProxyHost = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_HOST);
+ final String socksProxyPort = location.getConfPropValue(ConfProp.PI_SOCKS_PROXY_PORT);
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ switch (prop) {
+ case PI_RESOURCE_MGR_HOST: {
+ if (colocate) {
+ notifyChange(null, ConfProp.PI_NAME_NODE_HOST, rmHost);
+ notifyChange(null, ConfProp.PI_JOB_HISTORY_HOST, rmHost);
+ }
+ String newJobTrackerURI = String.format("%s:%s", rmHost, rmPort);
+ notifyChange(null, ConfProp.RM_DEFAULT_URI, newJobTrackerURI);
+ break;
+ }
+ case PI_RESOURCE_MGR_PORT: {
+ String newJobTrackerURI = String.format("%s:%s", rmHost, rmPort);
+ notifyChange(null, ConfProp.RM_DEFAULT_URI, newJobTrackerURI);
+ break;
+ }
+ case PI_NAME_NODE_HOST: {
+ String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+ notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+
+ // Break colocation if someone force the DFS Master
+ if (!colocate && !nameNodeHost.equals(rmHost))
+ notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+ break;
+ }
+ case PI_NAME_NODE_PORT: {
+ String newHDFSURI = String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+ notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+ break;
+ }
+
+ case PI_JOB_HISTORY_HOST: {
+ String newJobHistoryURI = String.format("%s:%s", jhHost, jhPort);
+ notifyChange(null, ConfProp.JOB_HISTORY_DEFAULT_URI, newJobHistoryURI);
+
+ // Break colocation if someone force the DFS Master
+ if (!colocate && !nameNodeHost.equals(rmHost))
+ notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+ break;
+ }
+ case PI_JOB_HISTORY_PORT: {
+ String newJobHistoryURI = String.format("%s:%s", jhHost, jhPort);
+ notifyChange(null, ConfProp.JOB_HISTORY_DEFAULT_URI, newJobHistoryURI);
+ break;
+ }
+
+ case PI_SOCKS_PROXY_HOST: {
+ String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+ notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+ break;
+ }
+ case PI_SOCKS_PROXY_PORT: {
+ String newSocksProxyURI = String.format("%s:%s", socksProxyHost, socksProxyPort);
+ notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+ break;
+ }
+ case RM_DEFAULT_URI: {
+ String[] strs = rmDefaultURI.split(":", 2);
+ String host = strs[0];
+ String port = (strs.length == 2) ? strs[1] : "";
+ notifyChange(null, ConfProp.PI_RESOURCE_MGR_HOST, host);
+ notifyChange(null, ConfProp.PI_RESOURCE_MGR_PORT, port);
+ break;
+ }
+ case JOB_HISTORY_DEFAULT_URI: {
+ String[] strs = jhDefaultURI.split(":", 2);
+ String host = strs[0];
+ String port = (strs.length == 2) ? strs[1] : "";
+ notifyChange(null, ConfProp.PI_JOB_HISTORY_HOST, host);
+ notifyChange(null, ConfProp.PI_JOB_HISTORY_PORT, port);
+ break;
+ }
+ case FS_DEFAULT_URI: {
+ try {
+ URI uri = new URI(fsDefaultURI);
+ if (uri.getScheme().equals("hdfs")) {
+ String host = uri.getHost();
+ String port = Integer.toString(uri.getPort());
+ notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
+ notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
+ }
+ } catch (URISyntaxException use) {
+ // Ignore the update!
+ }
+ break;
+ }
+ case SOCKS_SERVER: {
+ String[] strs = socksServerURI.split(":", 2);
+ String host = strs[0];
+ String port = (strs.length == 2) ? strs[1] : "";
+ notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
+ notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
+ break;
+ }
+ case PI_COLOCATE_MASTERS: {
+ if (colocate) {
+ notifyChange(null, ConfProp.PI_NAME_NODE_HOST, rmHost);
+ notifyChange(null, ConfProp.PI_JOB_HISTORY_HOST, rmHost);
+ }
+ break;
+ }
+ case PI_SOCKS_PROXY_ENABLE: {
+ if (socksProxyEnable) {
+ notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.SocksSocketFactory");
+ } else {
+ notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT, "org.apache.hadoop.net.StandardSocketFactory");
+ }
+ break;
+ }
+ }
+ }
+ });
+
+ }
+
+ /**
+ * Change notifications on properties (by name). A property might
+ * not be reflected as a ConfProp enum. If it is, the notification
+ * is forwarded to the ConfProp notifyChange method. If not, it is
+ * processed here.
+ *
+ * @param source
+ * @param propName
+ * @param propValue
+ */
+ void notifyChange(TabListener source, String propName, String propValue) {
+ ConfProp prop = location.getConfPropForName(propName);
+ if (prop != null)
+ notifyChange(source, prop, propValue);
+ else
+ location.setConfPropValue(propName, propValue);
+ }
+
+ /**
+ * Broadcast a property change to all registered tabs. If a tab is
+ * identified as the source of the change, this tab will not be
+ * notified.
+ *
+ * @param source
+ * TODO
+ * @param prop
+ * @param value
+ */
+ private void fireChange(TabListener source, ConfProp prop, String value) {
+ for (TabListener tab : tabs) {
+ if (tab != source)
+ tab.notifyChange(prop, value);
+ }
+ }
+
+ }
+
+ /**
+ * Create a SWT Text component for the given {@link ConfProp} text
+ * configuration property.
+ *
+ * @param listener
+ * @param parent
+ * @param prop
+ * @return
+ */
+ private Text createConfText(ModifyListener listener, Composite parent, ConfProp prop) {
+ Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+ GridData data = new GridData(GridData.FILL_HORIZONTAL);
+ text.setLayoutData(data);
+ text.setData("hProp", prop);
+ text.setText(location.getConfPropValue(prop));
+ text.addModifyListener(listener);
+ return text;
+ }
+
+ /**
+ * Create a SWT Checked Button component for the given {@link ConfProp}
+ * boolean configuration property.
+ *
+ * @param listener
+ * @param parent
+ * @param prop
+ * @return
+ */
+ private Button createConfCheckButton(SelectionListener listener, Composite parent, ConfProp prop, String text) {
+ Button button = new Button(parent, SWT.CHECK);
+ button.setText(text);
+ button.setData("hProp", prop);
+ button.setSelection(location.getConfPropValue(prop).equalsIgnoreCase("yes"));
+ button.addSelectionListener(listener);
+ return button;
+ }
+
+ /**
+ * Create editor entry for the given configuration property. The editor
+ * is a couple (Label, Text).
+ *
+ * @param listener
+ * the listener to trigger on property change
+ * @param parent
+ * the SWT parent container
+ * @param prop
+ * the property to create an editor for
+ * @param labelText
+ * a label (null will defaults to the property name)
+ *
+ * @return a SWT Text field
+ */
+ private Text createConfLabelText(ModifyListener listener, Composite parent, ConfProp prop, String labelText) {
+ Label label = new Label(parent, SWT.NONE);
+ if (labelText == null)
+ labelText = location.getConfPropName(prop);
+ label.setText(labelText);
+ return createConfText(listener, parent, prop);
+ }
+
+ /**
+ * Create an editor entry for the given configuration name
+ *
+ * @param listener
+ * the listener to trigger on property change
+ * @param parent
+ * the SWT parent container
+ * @param propName
+ * the name of the property to create an editor for
+ * @param labelTextRACKER_HOST
+ * a label (null will defaults to the property name)
+ *
+ * @return a SWT Text field
+ */
+ private Text createConfNameEditor(ModifyListener listener, Composite parent, String propName, String labelText) {
+
+ {
+ ConfProp prop = location.getConfPropForName(propName);
+ if (prop != null)
+ return createConfLabelText(listener, parent, prop, labelText);
+ }
+
+ Label label = new Label(parent, SWT.NONE);
+ if (labelText == null)
+ labelText = propName;
+ label.setText(labelText);
+
+ Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+ GridData data = new GridData(GridData.FILL_HORIZONTAL);
+ text.setLayoutData(data);
+ text.setData("hPropName", propName);
+ text.setText(location.getConfPropValue(propName));
+ text.addModifyListener(listener);
+
+ return text;
+ }
+
+ /**
+ * Main parameters of the Hadoop location: <li>host and port of the
+ * Map/Reduce master (Job tracker) <li>host and port of the DFS master
+ * (Name node) <li>SOCKS proxy
+ */
+ private class TabMain implements TabListener, ModifyListener, SelectionListener {
+
+ TabMediator mediator;
+
+ Text textRMHost;
+
+ Text textNNHost;
+
+ Button colocateMasters;
+
+ Text textJTPort;
+
+ Text textNNPort;
+
+ Text userName;
+
+ Button useSocksProxy;
+
+ Text socksProxyHost;
+
+ Text socksProxyPort;
+
+ private Group groupMR;
+
+ private Text textJHHost;
+
+ private Text textJHPort;
+
+ TabMain(TabMediator mediator) {
+ this.mediator = mediator;
+ TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+ tab.setText("General");
+ tab.setToolTipText("General location parameters");
+ tab.setControl(createControl(mediator.folder));
+ }
+
+ private Control createControl(Composite parent) {
+
+ Composite panel = new Composite(parent, SWT.FILL);
+ panel.setLayout(new GridLayout(2, false));
+
+ GridData data;
+
+ /*
+ * Map/Reduce group
+ */
+ {
+ groupMR = new Group(panel, SWT.SHADOW_NONE);
+ groupMR.setText("Resource Manager Node");
+ groupMR.setToolTipText("Address of the Resource Manager node.");
+ GridLayout layout = new GridLayout(2, false);
+ groupMR.setLayout(layout);
+ data = new GridData();
+ data.verticalAlignment = SWT.FILL;
+ data.horizontalAlignment = SWT.CENTER;
+ data.widthHint = 250;
+ groupMR.setLayoutData(data);
+
+ // Job Tracker host
+ Label label = new Label(groupMR, SWT.NONE);
+ label.setText("Host:");
+ data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+ label.setLayoutData(data);
+
+ textRMHost = createConfText(this, groupMR, ConfProp.PI_RESOURCE_MGR_HOST);
+ data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+ textRMHost.setLayoutData(data);
+
+ colocateMasters = createConfCheckButton(this, groupMR, ConfProp.PI_COLOCATE_MASTERS, "Use RM host for other services.");
+ data = new GridData();
+ data.horizontalSpan = 2;
+ colocateMasters.setLayoutData(data);
+
+ // Job Tracker port
+ label = new Label(groupMR, SWT.NONE);
+ label.setText("Port:");
+ data = new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+ label.setLayoutData(data);
+
+ textJTPort = createConfText(this, groupMR, ConfProp.PI_RESOURCE_MGR_PORT);
+ data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+ textJTPort.setLayoutData(data);
+ }
+
+ /*
+ * Job history Server
+ */
+ {
+ Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+ groupDFS.setText("Job History Node");
+ groupDFS.setToolTipText("Address of the Job Histroy Node.");
+ GridLayout layout = new GridLayout(2, false);
+ groupDFS.setLayout(layout);
+ data = new GridData();
+ data.horizontalAlignment = SWT.CENTER;
+ data.verticalAlignment = SWT.FILL;
+ data.widthHint = 250;
+ groupDFS.setLayoutData(data);
+
+ // Job Tracker host
+ Label label = new Label(groupDFS, SWT.NONE);
+ data = new GridData();
+ label.setText("Host:");
+ label.setLayoutData(data);
+
+ textJHHost = createConfText(this, groupDFS, ConfProp.PI_JOB_HISTORY_HOST);
+
+ // Job Tracker port
+ label = new Label(groupDFS, SWT.NONE);
+ data = new GridData();
+ label.setText("Port:");
+ label.setLayoutData(data);
+
+ textJHPort = createConfText(this, groupDFS, ConfProp.PI_JOB_HISTORY_PORT);
+ }
+
+ {
+ Composite subpanel = new Composite(panel, SWT.FILL);
+ subpanel.setLayout(new GridLayout(2, false));
+ data = new GridData();
+ data.horizontalSpan = 2;
+ data.horizontalAlignment = SWT.FILL;
+ subpanel.setLayoutData(data);
+
+ userName = createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME, "&User name:");
+ }
+
+
+ /*
+ * DFS group
+ */
+ {
+ Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+ groupDFS.setText("DFS Master");
+ groupDFS.setToolTipText("Address of the Distributed FileSystem " + "master node (the Name Node).");
+ GridLayout layout = new GridLayout(2, false);
+ groupDFS.setLayout(layout);
+ data = new GridData();
+ data.horizontalAlignment = SWT.CENTER;
+ data.verticalAlignment = SWT.FILL;
+ data.widthHint = 250;
+ groupDFS.setLayoutData(data);
+
+ // Job Tracker host
+ Label label = new Label(groupDFS, SWT.NONE);
+ data = new GridData();
+ label.setText("Host:");
+ label.setLayoutData(data);
+
+ textNNHost = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
+
+ // Job Tracker port
+ label = new Label(groupDFS, SWT.NONE);
+ data = new GridData();
+ label.setText("Port:");
+ label.setLayoutData(data);
+
+ textNNPort = createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
+ }
+
+ // SOCKS proxy group
+ {
+ Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
+ groupSOCKS.setText("SOCKS proxy");
+ groupSOCKS.setToolTipText("Address of the SOCKS proxy to use " + "to connect to the infrastructure.");
+ GridLayout layout = new GridLayout(2, false);
+ groupSOCKS.setLayout(layout);
+ data = new GridData();
+ data.horizontalAlignment = SWT.CENTER;
+ data.widthHint = 250;
+ groupSOCKS.setLayoutData(data);
+
+ useSocksProxy = createConfCheckButton(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
+ data = new GridData();
+ data.horizontalSpan = 2;
+ useSocksProxy.setLayoutData(data);
+
+ // SOCKS proxy host
+ Label label = new Label(groupSOCKS, SWT.NONE);
+ data = new GridData();
+ label.setText("Host:");
+ label.setLayoutData(data);
+
+ socksProxyHost = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
+
+ // SOCKS proxy port
+ label = new Label(groupSOCKS, SWT.NONE);
+ data = new GridData();
+ label.setText("Port:");
+ label.setLayoutData(data);
+
+ socksProxyPort = createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
+ }
+
+ // Update the state of all widgets according to the current
+ // values!
+ reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
+ reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
+ reloadConfProp(ConfProp.PI_HADOOP_VERSION);
+
+ return panel;
+ }
+
+ /**
+ * Reload the given configuration property value
+ *
+ * @param prop
+ */
+ private void reloadConfProp(ConfProp prop) {
+ this.notifyChange(prop, location.getConfPropValue(prop));
+ }
+
+ public void notifyChange(ConfProp prop, String propValue) {
+ switch (prop) {
+ case PI_RESOURCE_MGR_HOST: {
+ textRMHost.setText(propValue);
+ break;
+ }
+ case PI_RESOURCE_MGR_PORT: {
+ textJTPort.setText(propValue);
+ break;
+ }
+ case PI_USER_NAME: {
+ userName.setText(propValue);
+ break;
+ }
+ case PI_COLOCATE_MASTERS: {
+ if (colocateMasters != null) {
+ boolean colocate = propValue.equalsIgnoreCase("yes");
+ colocateMasters.setSelection(colocate);
+ if (textNNHost != null) {
+ textNNHost.setEnabled(!colocate);
+ }
+ if (textJHHost != null) {
+ textJHHost.setEnabled(!colocate);
+ }
+ }
+ break;
+ }
+ case PI_NAME_NODE_HOST: {
+ textNNHost.setText(propValue);
+ break;
+ }
+ case PI_NAME_NODE_PORT: {
+ textNNPort.setText(propValue);
+ break;
+ }
+ case PI_JOB_HISTORY_HOST: {
+ textJHHost.setText(propValue);
+ break;
+ }
+ case PI_JOB_HISTORY_PORT: {
+ textJHPort.setText(propValue);
+ break;
+ }
+
+ case PI_SOCKS_PROXY_ENABLE: {
+ if (useSocksProxy != null) {
+ boolean useProxy = propValue.equalsIgnoreCase("yes");
+ useSocksProxy.setSelection(useProxy);
+ if (socksProxyHost != null)
+ socksProxyHost.setEnabled(useProxy);
+ if (socksProxyPort != null)
+ socksProxyPort.setEnabled(useProxy);
+ }
+ break;
+ }
+ case PI_SOCKS_PROXY_HOST: {
+ socksProxyHost.setText(propValue);
+ break;
+ }
+ case PI_SOCKS_PROXY_PORT: {
+ socksProxyPort.setText(propValue);
+ break;
+ }
+ }
+ }
+
+ /* @inheritDoc */
+ public void modifyText(ModifyEvent e) {
+ final Text text = (Text) e.widget;
+ final ConfProp prop = (ConfProp) text.getData("hProp");
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ mediator.notifyChange(TabMain.this, prop, text.getText());
+ }
+ });
+ }
+
+ /* @inheritDoc */
+ public void widgetDefaultSelected(SelectionEvent e) {
+ this.widgetSelected(e);
+ }
+
+ /* @inheritDoc */
+ public void widgetSelected(SelectionEvent e) {
+ final Button button = (Button) e.widget;
+ final ConfProp prop = (ConfProp) button.getData("hProp");
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ // We want to receive the update also!
+ mediator.notifyChange(null, prop, button.getSelection() ? "yes" : "no");
+ }
+ });
+ }
+
+ }
+
+ private class TabAdvanced implements TabListener, ModifyListener {
+ TabMediator mediator;
+ private Composite panel;
+ private Map<String, Text> textMap = new TreeMap<String, Text>();
+
+ TabAdvanced(TabMediator mediator) {
+ this.mediator = mediator;
+ TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+ tab.setText("Advanced parameters");
+ tab.setToolTipText("Access to advanced Hadoop parameters");
+ tab.setControl(createControl(mediator.folder));
+
+ }
+
+ private Control createControl(Composite parent) {
+ ScrolledComposite sc = new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL);
+ panel = buildPanel(sc);
+ sc.setContent(panel);
+ sc.setExpandHorizontal(true);
+ sc.setExpandVertical(true);
+ sc.setMinSize(640, 480);
+ sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+ return sc;
+ }
+
+ private Composite buildPanel(Composite parent) {
+ Composite panel = new Composite(parent, SWT.NONE);
+ GridLayout layout = new GridLayout();
+ layout.numColumns = 2;
+ layout.makeColumnsEqualWidth = false;
+ panel.setLayout(layout);
+ panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true, true, 1, 1));
+
+ // Sort by property name
+ SortedMap<String, String> map = new TreeMap<String, String>();
+ Iterator<Entry<String, String>> it = location.getConfiguration();
+ while (it.hasNext()) {
+ Entry<String, String> entry = it.next();
+ map.put(entry.getKey(), entry.getValue());
+ }
+
+ for (Entry<String, String> entry : map.entrySet()) {
+ Text text = createConfNameEditor(this, panel, entry.getKey(), null);
+ textMap.put(entry.getKey(), text);
+ }
+ return panel;
+ }
+
+ public void notifyChange(ConfProp prop, final String propValue) {
+ Text text = textMap.get(location.getConfPropName(prop));
+ text.setText(propValue);
+ }
+
+ public void modifyText(ModifyEvent e) {
+ final Text text = (Text) e.widget;
+ Object hProp = text.getData("hProp");
+ final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
+ Object hPropName = text.getData("hPropName");
+ final String propName = (hPropName != null) ? (String) hPropName : null;
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ if (prop != null)
+ mediator.notifyChange(TabAdvanced.this, prop, text.getText());
+ else
+ mediator.notifyChange(TabAdvanced.this, propName, text.getText());
+ }
+ });
+ }
+
+ }
+
+ @Override
+ public void notifyChange(ConfProp confProp, String text) {
+ mediator.notifyChange(null, ConfProp.PI_LOCATION_NAME, text);
+ }
+
+ @Override
+ public void setChangeListener(ChangeListener l) {
+ changelistener=l;
+ }
+
+}
\ No newline at end of file
diff --git a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
index baa7145..01bbaee 100644
--- a/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui.test/META-INF/MANIFEST.MF
@@ -2,7 +2,7 @@
Bundle-ManifestVersion: 2
Bundle-Name: Apache Hadoop UI Test Eclipse Plugin
Bundle-SymbolicName: org.apache.hdt.ui.test;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.incubating
Bundle-Activator: org.apache.hdt.ui.test.Activator
Bundle-Vendor: Apache Hadoop
Require-Bundle: org.eclipse.ui,
diff --git a/org.apache.hdt.ui.test/pom.xml b/org.apache.hdt.ui.test/pom.xml
index 1cbb8d0..5b10a29 100644
--- a/org.apache.hdt.ui.test/pom.xml
+++ b/org.apache.hdt.ui.test/pom.xml
@@ -24,7 +24,7 @@
<relativePath>../pom.xml</relativePath>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
</parent>
<artifactId>org.apache.hdt.ui.test</artifactId>
@@ -37,6 +37,7 @@
<plugin>
<groupId>org.eclipse.tycho</groupId>
<artifactId>tycho-surefire-plugin</artifactId>
+ <version>${tycho-version}</version>
</plugin>
</plugins>
</build>
diff --git a/org.apache.hdt.ui/META-INF/MANIFEST.MF b/org.apache.hdt.ui/META-INF/MANIFEST.MF
index 3825bf1..86b811a 100644
--- a/org.apache.hdt.ui/META-INF/MANIFEST.MF
+++ b/org.apache.hdt.ui/META-INF/MANIFEST.MF
@@ -2,17 +2,22 @@
Bundle-ManifestVersion: 2
Bundle-Name: Apache Hadoop UI Eclipse Plugin
Bundle-SymbolicName: org.apache.hdt.ui;singleton:=true
-Bundle-Version: 0.0.1.incubating
+Bundle-Version: 0.0.2.incubating
Bundle-Activator: org.apache.hdt.ui.Activator
Bundle-Vendor: Apache Hadoop
Require-Bundle: org.eclipse.core.runtime,
org.eclipse.core.resources,
org.eclipse.ui,
+ org.eclipse.jdt.core,
+ org.eclipse.jdt.ui,
org.eclipse.ui.ide;bundle-version="3.6.0",
org.eclipse.team.ui;bundle-version="3.5.100",
org.eclipse.ui.navigator;bundle-version="3.5.0",
org.eclipse.ui.navigator.resources;bundle-version="3.4.200",
org.eclipse.ui.views.properties.tabbed;bundle-version="3.5.100";resolution:=optional,
+ org.eclipse.jdt.debug.ui,
+ org.eclipse.jdt.launching,
+ org.eclipse.debug.ui,
org.apache.hdt.core
Bundle-RequiredExecutionEnvironment: JavaSE-1.6
Bundle-ActivationPolicy: lazy
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
index e6f1e53..7bc1a36 100644
--- a/org.apache.hdt.ui/plugin.xml
+++ b/org.apache.hdt.ui/plugin.xml
@@ -17,6 +17,14 @@
limitations under the License.
-->
<plugin>
+ <extension
+ point="org.eclipse.ui.preferencePages">
+ <page
+ class="org.apache.hdt.ui.preferences.MapReducePreferencePage"
+ id="org.apache.hdt.ui.preferences.MapReducePreferencePage"
+ name="Hadoop">
+ </page>
+ </extension>
<extension
point="org.eclipse.ui.perspectives">
<perspective
@@ -31,26 +39,34 @@
<perspectiveExtension
targetID="org.apache.hdt.ui.perspective">
<view
- id="org.apache.hdt.ui.view.servers"
+ id="org.eclipse.ui.navigator.ProjectExplorer"
minimized="false"
ratio="0.25"
relationship="left"
relative="org.eclipse.ui.editorss">
</view>
<view
- id="org.eclipse.ui.navigator.ProjectExplorer"
+ id="org.apache.hdt.ui.view.servers"
minimized="false"
ratio="0.5"
relationship="bottom"
- relative="org.apache.hdt.ui.view.servers">
+ relative="org.eclipse.ui.navigator.ProjectExplorer">
</view>
<view
- id="org.eclipse.ui.views.PropertySheet"
+ id="org.apache.hdt.ui.ClusterView"
minimized="false"
ratio="0.66"
relationship="bottom"
relative="org.eclipse.ui.editorss">
</view>
+ <view
+ id="org.eclipse.ui.views.PropertySheet"
+ minimized="false"
+ relationship="stack"
+ relative="org.apache.hdt.ui.ClusterView">
+ </view>
+ <newWizardShortcut
+ id="org.apache.hdt.ui.wizard.newProjectWizard"/>
<newWizardShortcut
id="org.apache.hdt.ui.wizard.newHdfsServer">
</newWizardShortcut>
@@ -146,6 +162,14 @@
</extension>
<extension
point="org.eclipse.ui.newWizards">
+ <wizard
+ category="org.apache.hdt.ui.newWizards.category"
+ class="org.apache.hdt.ui.internal.mr.NewMapReduceProjectWizard"
+ finalPerspective="org.apache.hdt.ui.perspective"
+ icon="icons/hadoop-logo-16x16.png"
+ id="org.apache.hdt.ui.wizard.newProjectWizard"
+ name="Map/Reduce Project"
+ project="true"/>
<wizard
category="org.apache.hdt.ui.newWizards.category"
class="org.apache.hdt.ui.internal.hdfs.NewHDFSWizard"
@@ -154,10 +178,6 @@
id="org.apache.hdt.ui.wizard.newHdfsServer"
name="New HDFS Server">
</wizard>
- <category
- id="org.apache.hdt.ui.newWizards.category"
- name="Hadoop">
- </category>
<wizard
category="org.apache.hdt.ui.newWizards.category"
class="org.apache.hdt.ui.internal.zookeeper.NewZooKeeperWizard"
@@ -166,6 +186,44 @@
id="org.apache.hdt.ui.wizard.newZooKeeperServer"
name="New ZooKeeper Server">
</wizard>
+ <wizard
+ category="org.apache.hdt.ui.newWizards.category"
+ class="org.apache.hdt.ui.internal.mr.NewLocationWizard"
+ finalPerspective="org.apache.hdt.ui.perspective"
+ icon="icons/location-new-16x16.png"
+ id="org.apache.hdt.ui.wizard.newMRCluster"
+ name="New MR Cluster">
+ </wizard>
+ <wizard category="org.apache.hdt.ui.newWizards.category"
+ class="org.apache.hdt.ui.internal.mr.NewMapperWizard"
+ icon="icons/mapper16.png"
+ id="org.apache.hdt.ui.wizard.NewMapperWizard"
+ name="Mapper"
+ project="false"/>
+ <wizard category="org.apache.hdt.ui.newWizards.category"
+ class="org.apache.hdt.ui.internal.mr.NewReducerWizard"
+ icon="icons/reducer16.png"
+ id="org.apache.hdt.ui.wizard.NewReducerWizard"
+ name="Reducer"
+ project="false"/>
+ <wizard
+ category="org.apache.hdt.ui.newWizards.category"
+ class="org.apache.hdt.ui.internal.mr.NewDriverWizard"
+ icon="icons/driver.png"
+ id="org.apache.hdt.ui.wizard.NewDriverWizard"
+ name="MapReduce Driver"
+ project="false"/>
+ <wizard
+ category="org.apache.hdt.ui.newWizards.category"
+ class="org.apache.hdt.ui.internal.mr.NewPartitionerWizard"
+ icon="icons/Elephant16x16.gif"
+ id="org.apache.hdt.ui.wizard.NewPartitionerWizard"
+ name="Partitioner"
+ project="false"/>
+ <category
+ id="org.apache.hdt.ui.newWizards.category"
+ name="Hadoop">
+ </category>
</extension>
<extension
point="org.eclipse.ui.popupMenus">
@@ -381,19 +439,28 @@
</extension>
<extension
point="org.eclipse.ui.views">
+ <category
+ id="org.apache.hdt.ui.category"
+ name="Hadoop">
+ </category>
<view
allowMultiple="false"
category="org.apache.hdt.ui.category"
class="org.eclipse.ui.navigator.CommonNavigator"
icon="icons/hadoop-logo-16x16.png"
id="org.apache.hdt.ui.view.servers"
- name="Hadoop Servers"
+ name="ZooKeeper Servers"
restorable="true">
</view>
- <category
- id="org.apache.hdt.ui.category"
- name="Hadoop">
- </category>
+ <view
+ allowMultiple="false"
+ category="org.apache.hdt.ui.category"
+ class="org.apache.hdt.ui.internal.mr.ClusterView"
+ icon="icons/hadoop-logo-16x16.png"
+ id="org.apache.hdt.ui.ClusterView"
+ name="Hadoop Clusters"
+ restorable="true">
+ </view>
</extension>
<extension
point="org.eclipse.ui.actionSets">
@@ -420,5 +487,32 @@
</action>
</actionSet>
</extension>
-
+ <extension
+ point="org.eclipse.debug.ui.launchShortcuts">
+ <shortcut
+ class="org.apache.hdt.ui.internal.launch.HadoopApplicationLaunchShortcut"
+ icon="icons/elephantblue16x16.gif"
+ id="org.apache.hdt.launch.shortcut"
+ label="Run on Hadoop"
+ modes="run">
+ <contextualLaunch>
+ <contextLabel mode="run" label="Run on Hadoop" />
+ <enablement>
+ <with variable="selection">
+ <count value="1"/>
+ <iterate>
+ <or>
+ <test property="org.eclipse.jdt.launching.hasMain"/>
+ <and>
+ <test property="org.eclipse.jdt.launching.isContainer"/>
+ <test property="org.eclipse.jdt.launching.hasProjectNature" args="org.eclipse.jdt.core.javanature"/>
+ <test property="org.eclipse.jdt.launching.hasProjectNature" args="org.apache.hdt.mrature"/>
+ </and>
+ </or>
+ </iterate>
+ </with>
+ </enablement>
+ </contextualLaunch>
+ </shortcut>
+ </extension>
</plugin>
diff --git a/org.apache.hdt.ui/pom.xml b/org.apache.hdt.ui/pom.xml
index f275c81..0df6a60 100644
--- a/org.apache.hdt.ui/pom.xml
+++ b/org.apache.hdt.ui/pom.xml
@@ -22,7 +22,7 @@
<relativePath>../pom.xml</relativePath>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
</parent>
<artifactId>org.apache.hdt.ui</artifactId>
<packaging>eclipse-plugin</packaging>
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
new file mode 100644
index 0000000..b4017cd
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/ImageLibrary.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui;
+
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.eclipse.core.runtime.FileLocator;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.ui.ISharedImages;
+import org.eclipse.ui.PlatformUI;
+import org.eclipse.ui.plugin.AbstractUIPlugin;
+import org.osgi.framework.Bundle;
+
+/**
+ * Icons manager
+ */
+public class ImageLibrary {
+
+ private final Bundle bundle = Activator.getDefault().getBundle();
+
+ /**
+ * Singleton instance
+ */
+ private static volatile ImageLibrary instance = null;
+
+ private ISharedImages sharedImages = PlatformUI.getWorkbench().getSharedImages();
+
+ /**
+ * Where resources (icons, images...) are available in the Bundle
+ */
+ private static final String RESOURCE_DIR = "icons/";
+
+ /**
+ * Public access to image descriptors
+ *
+ * @param name
+ * @return the image descriptor
+ */
+ public static ImageDescriptor get(String name) {
+ return getInstance().getImageDescriptorByName(name);
+ }
+
+ /**
+ * Public access to images
+ *
+ * @param name
+ * @return the image
+ */
+ public static Image getImage(String name) {
+ return getInstance().getImageByName(name);
+ }
+
+ /**
+ * Singleton access
+ *
+ * @return the Image library
+ */
+ public static ImageLibrary getInstance() {
+ if (instance == null) {
+ synchronized (ImageLibrary.class) {
+ if (instance == null)
+ instance = new ImageLibrary();
+ }
+ }
+ return instance;
+ }
+
+ /**
+ * Map of registered resources (ImageDescriptor and Image)
+ */
+ private Map<String, ImageDescriptor> descMap = new HashMap<String, ImageDescriptor>();
+
+ private Map<String, Image> imageMap = new HashMap<String, Image>();
+
+ /**
+ * Image library constructor: put image definitions here.
+ */
+ private ImageLibrary() {
+ /*
+ * Servers view
+ */
+ newImage("server.view.location.entry", "Elephant-24x24.png");
+ newImage("server.view.job.entry", "job.gif");
+ newImage("server.view.action.location.new", "location-new-16x16.png");
+ newImage("server.view.action.location.edit", "location-edit-16x16.png");
+ newSharedImage("server.view.action.delete", ISharedImages.IMG_TOOL_DELETE);
+
+ /*
+ * DFS Browser
+ */
+ newImage("dfs.browser.root.entry", "files.gif");
+ newImage("dfs.browser.location.entry", "Elephant-16x16.png");
+ newSharedImage("dfs.browser.folder.entry", ISharedImages.IMG_OBJ_FOLDER);
+ newSharedImage("dfs.browser.file.entry", ISharedImages.IMG_OBJ_FILE);
+ // DFS files in editor
+ newSharedImage("dfs.file.editor", ISharedImages.IMG_OBJ_FILE);
+ // Actions
+ newImage("dfs.browser.action.mkdir", "new-folder.png");
+ newImage("dfs.browser.action.download", "download.png");
+ newImage("dfs.browser.action.upload_files", "upload.png");
+ newImage("dfs.browser.action.upload_dir", "upload.png");
+ newSharedImage("dfs.browser.action.delete", ISharedImages.IMG_TOOL_DELETE);
+ newImage("dfs.browser.action.refresh", "refresh.png");
+
+ /*
+ * Wizards
+ */
+ newImage("wizard.mapper.new", "mapwiz.png");
+ newImage("wizard.reducer.new", "reducewiz.png");
+ newImage("wizard.driver.new", "driverwiz.png");
+ newImage("wizard.mapreduce.project.new", "projwiz.png");
+ }
+
+ /**
+ * Accessor to images
+ *
+ * @param name
+ * @return
+ */
+ private ImageDescriptor getImageDescriptorByName(String name) {
+ return this.descMap.get(name);
+ }
+
+ /**
+ * Accessor to images
+ *
+ * @param name
+ * @return
+ */
+ private Image getImageByName(String name) {
+ return this.imageMap.get(name);
+ }
+
+ /**
+ * Access to platform shared images
+ *
+ * @param name
+ * @return
+ */
+ private ImageDescriptor getSharedByName(String name) {
+ return sharedImages.getImageDescriptor(name);
+ }
+
+ /**
+ * Load and register a new image. If the image resource does not exist or
+ * fails to load, a default "error" resource is supplied.
+ *
+ * @param name
+ * name of the image
+ * @param filename
+ * name of the file containing the image
+ * @return whether the image has correctly been loaded
+ */
+ private boolean newImage(String name, String filename) {
+ ImageDescriptor id;
+ boolean success;
+
+ try {
+ URL fileURL = FileLocator.find(bundle, new Path(RESOURCE_DIR + filename), null);
+ id = ImageDescriptor.createFromURL(FileLocator.toFileURL(fileURL));
+ success = true;
+
+ } catch (Exception e) {
+
+ e.printStackTrace();
+ id = ImageDescriptor.getMissingImageDescriptor();
+ // id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+ success = false;
+ }
+
+ descMap.put(name, id);
+ imageMap.put(name, id.createImage(true));
+
+ return success;
+ }
+
+ /**
+ * Register an image from the workspace shared image pool. If the image
+ * resource does not exist or fails to load, a default "error" resource is
+ * supplied.
+ *
+ * @param name
+ * name of the image
+ * @param sharedName
+ * name of the shared image ({@link ISharedImages})
+ * @return whether the image has correctly been loaded
+ */
+ private boolean newSharedImage(String name, String sharedName) {
+ boolean success = true;
+ ImageDescriptor id = getSharedByName(sharedName);
+
+ if (id == null) {
+ id = ImageDescriptor.getMissingImageDescriptor();
+ // id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+ success = false;
+ }
+
+ descMap.put(name, id);
+ imageMap.put(name, id.createImage(true));
+
+ return success;
+ }
+
+ /**
+ * Register an image from the workspace shared image pool. If the image
+ * resource does not exist or fails to load, a default "error" resource is
+ * supplied.
+ *
+ * @param name
+ * name of the image
+ * @param sharedName
+ * name of the shared image ({@link ISharedImages})
+ * @return whether the image has correctly been loaded
+ */
+ private boolean newPluginImage(String name, String pluginId, String filename) {
+
+ boolean success = true;
+ ImageDescriptor id = AbstractUIPlugin.imageDescriptorFromPlugin(pluginId, filename);
+
+ if (id == null) {
+ id = ImageDescriptor.getMissingImageDescriptor();
+ // id = getSharedByName(ISharedImages.IMG_OBJS_ERROR_TSK);
+ success = false;
+ }
+
+ descMap.put(name, id);
+ imageMap.put(name, id.createImage(true));
+
+ return success;
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
index 9424a45..f0d01f8 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/HDFSLightweightLabelDecorator.java
@@ -111,7 +111,7 @@
String userId = server.getUserId();
if (userId == null) {
try {
- userId = hdfsManager.getClient(serverUrl).getDefaultUserAndGroupIds().get(0);
+ userId = hdfsManager.getClient(serverUrl,server.getVersion()).getDefaultUserAndGroupIds().get(0);
} catch (Throwable e) {
userId = null;
}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
index f5eca4d..cf49d40 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSServerWizardPage.java
@@ -24,8 +24,10 @@
import java.util.List;
import java.util.StringTokenizer;
+import org.apache.hdt.core.HadoopVersion;
import org.apache.hdt.core.hdfs.HDFSClient;
import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.launch.ConfProp;
import org.apache.hdt.ui.Activator;
import org.apache.log4j.Logger;
import org.eclipse.core.runtime.CoreException;
@@ -41,8 +43,10 @@
import org.eclipse.swt.widgets.Combo;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.Group;
import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.Text;
public class NewHDFSServerWizardPage extends WizardPage {
@@ -55,6 +59,8 @@
private String hdfsServerName = null;
private boolean overrideDefaultSecurity = false;
private String userId = null;
+ private Combo hdfsVersionOptions;
+ private String hdfsVersion;;
private List<String> groupIds = new ArrayList<String>();
protected NewHDFSServerWizardPage() {
@@ -115,6 +121,31 @@
Label exampleLabel = new Label(c, SWT.NONE);
exampleLabel.setText("Example: hdfs://hdfs.server.hostname:8020");
exampleLabel.setForeground(Display.getCurrent().getSystemColor(SWT.COLOR_DARK_GRAY));
+
+ /*
+ * HDFS version
+ */
+ {
+ Label label = new Label(c, SWT.NONE);
+ label.setText("&HDFS Version:");
+ Combo options = new Combo (c, SWT.SINGLE | SWT.BORDER|SWT.READ_ONLY);
+ options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+ for(HadoopVersion ver:HadoopVersion.values()){
+ options.add(ver.getDisplayName());
+ }
+ options.addListener (SWT.Selection, new Listener () {
+
+ @Override
+ public void handleEvent(Event arg0) {
+ hdfsVersion = hdfsVersionOptions.getText();
+ }
+
+ });
+ options.select(0);
+ hdfsVersion=options.getItem(0);
+ hdfsVersionOptions = options;
+ }
+
// Security
Group securityGroup = new Group(c, SWT.SHADOW_ETCHED_IN);
GridData gd = new GridData(GridData.FILL_HORIZONTAL);
@@ -191,7 +222,7 @@
private List<String> getUserAndGroupIds() {
List<String> list = new ArrayList<String>();
try {
- HDFSClient client = HDFSManager.INSTANCE.getClient(hdfsServerLocation);
+ HDFSClient client = HDFSManager.INSTANCE.getClient(hdfsServerLocation,ConfProp.PI_HADOOP_VERSION.defVal);
List<String> defaultUserAndGroupIds = client.getDefaultUserAndGroupIds();
if (defaultUserAndGroupIds != null)
list.addAll(defaultUserAndGroupIds);
@@ -239,4 +270,8 @@
public List<String> getGroupIds() {
return groupIds;
}
+
+ public String getHDFSVersion() {
+ return hdfsVersion;
+ }
}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
index 545ea3a..e66c9c4 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/hdfs/NewHDFSWizard.java
@@ -17,25 +17,26 @@
*/
package org.apache.hdt.ui.internal.hdfs;
-import java.net.URI;
-import java.net.URISyntaxException;
-
import org.apache.hdt.core.internal.hdfs.HDFSManager;
import org.apache.hdt.ui.Activator;
import org.apache.log4j.Logger;
import org.eclipse.core.runtime.CoreException;
-import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.jface.preference.IPreferenceStore;
import org.eclipse.jface.viewers.IStructuredSelection;
import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.INewWizard;
import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
-public class NewHDFSWizard extends Wizard implements INewWizard {
+public class NewHDFSWizard extends Wizard implements INewWizard,IExecutableExtension {
private static Logger logger = Logger.getLogger(NewHDFSWizard.class);
private NewHDFSServerWizardPage serverLocationWizardPage = null;
+ private IConfigurationElement configElement;
public NewHDFSWizard() {
// TODO Auto-generated constructor stub
@@ -61,6 +62,11 @@
@Override
public boolean performFinish() {
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ BasicNewProjectResourceWizard.updatePerspective(configElement);
+ }
+ });
if (serverLocationWizardPage != null) {
String ambariUrl = serverLocationWizardPage.getHdfsServerLocation();
if (ambariUrl != null) {
@@ -73,17 +79,10 @@
Job j = new Job("Creating HDFS project [" + serverLocationWizardPage.getHdfsServerName() + "]") {
protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
- try {
- HDFSManager.INSTANCE.createServer(serverLocationWizardPage.getHdfsServerName(), new URI(serverLocationWizardPage
- .getHdfsServerLocation()), serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getUserId()
- : null, serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null);
- } catch (CoreException e) {
- logger.warn(e.getMessage(), e);
- return e.getStatus();
- } catch (URISyntaxException e) {
- logger.warn(e.getMessage(), e);
- }
- return Status.OK_STATUS;
+ return HDFSManager.addServer(serverLocationWizardPage.getHdfsServerName(),serverLocationWizardPage.getHdfsServerLocation(),
+ serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getUserId() : null,
+ serverLocationWizardPage.isOverrideDefaultSecurity() ? serverLocationWizardPage.getGroupIds() : null,
+ serverLocationWizardPage.getHDFSVersion());
};
};
j.schedule();
@@ -92,5 +91,14 @@
}
return false;
}
+ /* (non-Javadoc)
+ * @see org.eclipse.core.runtime.IExecutableExtension#setInitializationData
+ * (org.eclipse.core.runtime.IConfigurationElement, java.lang.String, java.lang.Object)
+ */
+ @Override
+ public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+ this.configElement=config;
+ }
+
}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
new file mode 100644
index 0000000..4cc03d4
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopApplicationLaunchShortcut.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Logger;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.debug.core.ILaunchConfiguration;
+import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
+import org.eclipse.jdt.core.IJavaProject;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.core.JavaCore;
+import org.eclipse.jdt.debug.ui.launchConfigurations.JavaApplicationLaunchShortcut;
+import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
+import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
+import org.eclipse.jdt.launching.JavaRuntime;
+import org.eclipse.jface.wizard.IWizard;
+import org.eclipse.jface.wizard.WizardDialog;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Shell;
+
+/**
+ * Add a shortcut "Run on Hadoop" to the Run menu
+ */
+
+public class HadoopApplicationLaunchShortcut extends JavaApplicationLaunchShortcut {
+
+ static Logger log = Logger.getLogger(HadoopApplicationLaunchShortcut.class.getName());
+
+ // private ActionDelegate delegate = new RunOnHadoopActionDelegate();
+
+ public HadoopApplicationLaunchShortcut() {
+ }
+
+ /* @inheritDoc */
+ @Override
+ protected ILaunchConfiguration createConfiguration(IType type) {
+
+ ILaunchConfiguration iConf = super.createConfiguration(type);
+ ILaunchConfigurationWorkingCopy iConfWC;
+ try {
+ /*
+ * Tune the default launch configuration: setup run-time classpath
+ * manually
+ */
+ iConfWC = iConf.getWorkingCopy();
+
+ iConfWC.setAttribute(IJavaLaunchConfigurationConstants.ATTR_DEFAULT_CLASSPATH, false);
+
+ List<String> classPath = new ArrayList<String>();
+ IResource resource = type.getResource();
+ IJavaProject project = (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+ IRuntimeClasspathEntry cpEntry = JavaRuntime.newDefaultProjectClasspathEntry(project);
+ classPath.add(0, cpEntry.getMemento());
+
+ iConfWC.setAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH, classPath);
+
+ } catch (CoreException e) {
+ e.printStackTrace();
+ // FIXME Error dialog
+ return null;
+ }
+
+ /*
+ * Update the selected configuration with a specific Hadoop location
+ * target
+ */
+ IResource resource = type.getResource();
+ if (!(resource instanceof IFile))
+ return null;
+ RunOnHadoopWizard wizard = new RunOnHadoopWizard((IFile) resource, iConfWC);
+ WizardDialog dialog = new WizardDialog(Display.getDefault().getActiveShell(), wizard);
+
+ dialog.create();
+ dialog.setBlockOnOpen(true);
+ if (dialog.open() != WizardDialog.OK)
+ return null;
+
+ try {
+
+ // Only save if some configuration is different.
+ if (!iConfWC.contentsEqual(iConf))
+ iConfWC.doSave();
+
+ } catch (CoreException e) {
+ e.printStackTrace();
+ // FIXME Error dialog
+ return null;
+ }
+
+ return iConfWC;
+ }
+
+ /**
+ * Was used to run the RunOnHadoopWizard inside and provide it a
+ * ProgressMonitor
+ */
+ static class Dialog extends WizardDialog {
+ public Dialog(Shell parentShell, IWizard newWizard) {
+ super(parentShell, newWizard);
+ }
+
+ @Override
+ public void create() {
+ super.create();
+
+ ((RunOnHadoopWizard) getWizard()).setProgressMonitor(getProgressMonitor());
+ }
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
new file mode 100644
index 0000000..c21ce79
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopLocationWizard.java
@@ -0,0 +1,378 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import javax.swing.event.ChangeEvent;
+
+import org.apache.hdt.core.HadoopVersion;
+import org.apache.hdt.core.internal.hdfs.HDFSManager;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.ChangeListener;
+import org.apache.hdt.core.launch.AbstractHadoopCluster.HadoopConfigurationBuilder;
+import org.apache.hdt.core.launch.ConfProp;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IWorkspaceRoot;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.jface.dialogs.IMessageProvider;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Combo;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.Text;
+
+/**
+ * Wizard for editing the settings of a Hadoop location
+ *
+ * The wizard contains 3 tabs: General, Tunneling and Advanced. It edits
+ * parameters of the location member which either a new location or a copy of an
+ * existing registered location.
+ */
+
+public class HadoopLocationWizard extends WizardPage {
+
+ Image circle;
+
+ /**
+ * The location effectively edited by the wizard. This location is a copy or
+ * a new one.
+ */
+ private AbstractHadoopCluster location;
+
+ /**
+ * The original location being edited by the wizard (null if we create a new
+ * instance).
+ */
+ private AbstractHadoopCluster original;
+ private Text locationName;
+ private Combo hadoopVersion;
+
+ /**
+ * New Hadoop location wizard
+ */
+ public HadoopLocationWizard() {
+ super("Hadoop Server", "New Hadoop Location", null);
+
+ this.original = null;
+ try {
+ this.location = AbstractHadoopCluster.createCluster(HadoopVersion.Version1.getDisplayName());
+ } catch (CoreException e) {
+ e.printStackTrace();
+ }
+ this.location.setLocationName("");
+ }
+
+ /**
+ * Constructor to edit the parameters of an existing Hadoop server
+ *
+ * @param server
+ */
+ public HadoopLocationWizard(AbstractHadoopCluster server) {
+ super("Create a new Hadoop location", "Edit Hadoop Location", null);
+ this.original = server;
+ try {
+ this.location = AbstractHadoopCluster.createCluster(server.getVersion().getDisplayName());
+ } catch (CoreException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * Performs any actions appropriate in response to the user having pressed
+ * the Finish button, or refuse if finishing now is not permitted.
+ *
+ * @return the created or updated Hadoop location
+ */
+
+ public AbstractHadoopCluster performFinish() {
+ try {
+ if (this.original == null) {
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ HDFSManager.addServer(location.getLocationName(),
+ location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
+ .getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion().getDisplayName());
+ }
+ });
+ // New location
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ ServerRegistry.getInstance().addServer(HadoopLocationWizard.this.location);
+ }
+ });
+ return this.location;
+
+ } else {
+
+ // Update location
+ final String originalName = this.original.getLocationName();
+ final String originalLoc = this.original.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+ final String newName = this.location.getLocationName();
+ final String newLoc = this.location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+
+ if (!originalName.equals(newName) || !originalLoc.equals(newLoc)){
+ IWorkspaceRoot root = ResourcesPlugin.getWorkspace().getRoot();
+ final IProject project = root.getProject(originalName);
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ if(project.exists()){
+ try {
+ project.close(null);
+ project.delete(true, null);
+ } catch (CoreException e) {
+ e.printStackTrace();
+ }
+ }
+ HDFSManager.addServer(location.getLocationName(),
+ location.getConfPropValue(ConfProp.FS_DEFAULT_URI), location
+ .getConfPropValue(ConfProp.PI_USER_NAME), null,location.getVersion().getDisplayName());
+ }
+ });
+ }
+ this.original.load(this.location);
+
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ ServerRegistry.getInstance().updateServer(originalName, HadoopLocationWizard.this.location);
+ }
+ });
+ return this.original;
+
+ }
+
+
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ setMessage("Invalid server location values", IMessageProvider.ERROR);
+ return null;
+ }
+ }
+
+ /**
+ * Validates the current Hadoop location settings (look for Hadoop
+ * installation directory).
+ *
+ */
+ private void testLocation() {
+ setMessage("Not implemented yet", IMessageProvider.WARNING);
+ }
+
+ /**
+ * Location is not complete (and finish button not available) until a host
+ * name is specified.
+ *
+ * @inheritDoc
+ */
+ @Override
+ public boolean isPageComplete() {
+
+ {
+ String locName = location.getConfPropValue(ConfProp.PI_LOCATION_NAME);
+ if ((locName == null) || (locName.length() == 0) || locName.contains("/")) {
+
+ setMessage("Bad location name: " + "the location name should not contain " + "any character prohibited in a file name.", WARNING);
+
+ return false;
+ }
+ }
+
+ {
+ String master = location.getConfPropValue(ConfProp.PI_JOB_TRACKER_HOST);
+ if ((master == null) || (master.length() == 0)) {
+
+ setMessage("Bad master host name: " + "the master host name refers to the machine " + "that runs the Job tracker.", WARNING);
+
+ return false;
+ }
+ }
+
+ {
+ String jobTracker = location.getConfPropValue(ConfProp.JOB_TRACKER_URI);
+ String[] strs = jobTracker.split(":");
+ boolean ok = (strs.length == 2);
+ if (ok) {
+ try {
+ int port = Integer.parseInt(strs[1]);
+ ok = (port >= 0) && (port < 65536);
+ } catch (NumberFormatException nfe) {
+ ok = false;
+ }
+ }
+ if (!ok) {
+ setMessage("The job tracker information is invalid. " + "This usually looks like \"host:port\"",
+ WARNING);
+ return false;
+ }
+ }
+
+ {
+ String fsDefaultURI = location.getConfPropValue(ConfProp.FS_DEFAULT_URI);
+ try {
+ URI uri = new URI(fsDefaultURI);
+ } catch (URISyntaxException e) {
+
+ setMessage("The default file system URI is invalid. " + "This usually looks like \"hdfs://host:port/\" " + "or \"file:///dir/\"", WARNING);
+ }
+ }
+
+ setMessage("Define the location of a Hadoop infrastructure " + "for running MapReduce applications.");
+ return true;
+ }
+
+ /**
+ * Create the wizard
+ */
+ /* @inheritDoc */
+ public void createControl(final Composite parent) {
+ setTitle("Define Hadoop location");
+ setDescription("Define the location of a Hadoop infrastructure " + "for running MapReduce applications.");
+
+ final Composite panel = new Composite(parent, SWT.FILL);
+ GridLayout glayout = new GridLayout(2, false);
+ panel.setLayout(glayout);
+ final HadoopConfigurationBuilder uiConfigurationBuilder = location.getUIConfigurationBuilder();
+ uiConfigurationBuilder.setChangeListener(new ChangeListener() {
+
+ @Override
+ public void notifyChange(ConfProp prop, String propValue) {
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ getContainer().updateButtons();
+ }});
+ }
+ });
+ /*
+ * Location name
+ */
+ {
+ Label label = new Label(panel, SWT.NONE);
+ label.setText( "&Location name:");
+ Text text = new Text(panel, SWT.SINGLE | SWT.BORDER);
+ GridData data = new GridData(GridData.FILL_HORIZONTAL);
+ text.setLayoutData(data);
+ text.setText(location.getConfPropValue(ConfProp.PI_LOCATION_NAME));
+ text.addModifyListener(new ModifyListener() {
+ @Override
+ public void modifyText(ModifyEvent e) {
+ final Text text = (Text) e.widget;
+ final ConfProp prop = (ConfProp) text.getData("hProp");
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ uiConfigurationBuilder.notifyChange(ConfProp.PI_LOCATION_NAME,text.getText());
+ }
+ });
+ }
+ });
+ locationName=text;
+ }
+ /*
+ * Hadoop version
+ */
+ {
+ Label label = new Label(panel, SWT.NONE);
+ label.setText("&Hadoop Version:");
+ Combo options = new Combo(panel, SWT.BORDER | SWT.READ_ONLY);
+ for(HadoopVersion ver:HadoopVersion.values()){
+ options.add(ver.getDisplayName());
+ }
+ int pos=0;
+ for(String item:options.getItems()){
+ if(item.equalsIgnoreCase(location.getVersion().getDisplayName())){
+ options.select(pos);
+ break;
+ }
+ pos++;
+ }
+ options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+ options.addListener(SWT.Selection, new Listener() {
+ @Override
+ public void handleEvent(Event event) {
+ final String selection = hadoopVersion.getText();
+ if (location == null || !selection.equals(location.getVersion())) {
+ Display.getDefault().syncExec(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ location = AbstractHadoopCluster.createCluster(selection);
+ location.setConfPropValue(ConfProp.PI_HADOOP_VERSION, selection);
+ location.setConfPropValue(ConfProp.PI_LOCATION_NAME, locationName.getText());
+ panel.dispose();
+ createControl(parent);
+ parent.pack();
+ parent.getParent().layout(true);
+ } catch (CoreException e) {
+ MessageDialog.openError(Display.getDefault().getActiveShell(), "HDFS Error", "Unable to create HDFS site :"
+ + e.getMessage());
+ }
+ }
+ });
+ }
+
+ }
+ });
+ hadoopVersion=options;
+ }
+ {
+ uiConfigurationBuilder.buildControl(panel);
+ this.setControl(panel);
+ }
+
+ {
+ final Button btn = new Button(panel, SWT.NONE);
+ btn.setText("&Load from file");
+ btn.setEnabled(false);
+ btn.setToolTipText("Not yet implemented");
+ btn.addListener(SWT.Selection, new Listener() {
+ public void handleEvent(Event e) {
+ // TODO
+ }
+ });
+ }
+ {
+ final Button validate = new Button(panel, SWT.NONE);
+ validate.setText("&Validate location");
+ validate.setEnabled(false);
+ validate.setToolTipText("Not yet implemented");
+ validate.addListener(SWT.Selection, new Listener() {
+ public void handleEvent(Event e) {
+ testLocation();
+ }
+ });
+ }
+
+ this.setControl(panel);
+ }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopServerSelectionListContentProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopServerSelectionListContentProvider.java
new file mode 100644
index 0000000..1f854d0
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/HadoopServerSelectionListContentProvider.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.eclipse.jface.viewers.IContentProvider;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.IStructuredContentProvider;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.graphics.Image;
+
+/**
+ * Provider that enables selection of a predefined Hadoop server.
+ */
+
+public class HadoopServerSelectionListContentProvider implements
+ IContentProvider, ITableLabelProvider, IStructuredContentProvider {
+ public void dispose() {
+
+ }
+
+ public void inputChanged(Viewer viewer, Object oldInput, Object newInput) {
+
+ }
+
+ public Image getColumnImage(Object element, int columnIndex) {
+ return null;
+ }
+
+ public String getColumnText(Object element, int columnIndex) {
+ if (element instanceof AbstractHadoopCluster) {
+ AbstractHadoopCluster location = (AbstractHadoopCluster) element;
+ if (columnIndex == 0) {
+ return location.getLocationName();
+
+ } else if (columnIndex == 1) {
+ return location.getMasterHostName();
+ }
+ }
+
+ return element.toString();
+ }
+
+ public void addListener(ILabelProviderListener listener) {
+
+ }
+
+ public boolean isLabelProperty(Object element, String property) {
+ return false;
+ }
+
+ public void removeListener(ILabelProviderListener listener) {
+
+ }
+
+ public Object[] getElements(Object inputElement) {
+ return ServerRegistry.getInstance().getServers().toArray();
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/JarModule.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/JarModule.java
new file mode 100644
index 0000000..a494baa
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/JarModule.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.io.File;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.launch.ErrorMessageDialog;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.ICompilationUnit;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
+import org.eclipse.jdt.ui.jarpackager.JarPackageData;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.PlatformUI;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public class JarModule implements IJarModule {
+
+ static Logger log = Logger.getLogger(JarModule.class.getName());
+
+ private IResource resource;
+
+ private File jarFile;
+
+ public JarModule(IResource resource) {
+ this.resource = resource;
+ }
+
+ public String getName() {
+ return resource.getProject().getName() + "/" + resource.getName();
+ }
+
+ /**
+ * Creates a JAR file containing the given resource (Java class with
+ * main()) and all associated resources
+ *
+ * @param resource the resource
+ * @return a file designing the created package
+ */
+ public void run(IProgressMonitor monitor) {
+
+ log.fine("Build jar");
+ JarPackageData jarrer = new JarPackageData();
+
+ jarrer.setExportJavaFiles(true);
+ jarrer.setExportClassFiles(true);
+ jarrer.setExportOutputFolders(true);
+ jarrer.setOverwrite(true);
+
+ try {
+ // IJavaProject project =
+ // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+
+ // check this is the case before letting this method get called
+ Object element = resource.getAdapter(IJavaElement.class);
+ IType type = ((ICompilationUnit) element).findPrimaryType();
+ jarrer.setManifestMainClass(type);
+
+ // Create a temporary JAR file name
+ File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+ String prefix =
+ String.format("%s_%s-", resource.getProject().getName(), resource
+ .getName());
+ File jarFile = File.createTempFile(prefix, ".jar", baseDir);
+ jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
+
+ jarrer.setElements(resource.getProject().members(IResource.FILE));
+ IJarExportRunnable runnable =
+ jarrer.createJarExportRunnable(Display.getDefault()
+ .getActiveShell());
+ runnable.run(monitor);
+
+ this.jarFile = jarFile;
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Allow the retrieval of the resulting JAR file
+ *
+ * @return the generated JAR file
+ */
+ public File getJarFile() {
+ return this.jarFile;
+ }
+
+ /**
+ * Static way to create a JAR package for the given resource and showing a
+ * progress bar
+ *
+ * @param resource
+ * @return
+ */
+ public static File createJarPackage(IResource resource) {
+
+ JarModule jarModule = new JarModule(resource);
+ try {
+ PlatformUI.getWorkbench().getProgressService().run(false, true,
+ jarModule);
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ return null;
+ }
+
+ File jarFile = jarModule.getJarFile();
+ if (jarFile == null) {
+ ErrorMessageDialog.display("Run on Hadoop",
+ "Unable to create or locate the JAR file for the Job");
+ return null;
+ }
+
+ return jarFile;
+ }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/RunOnHadoopWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/RunOnHadoopWizard.java
new file mode 100644
index 0000000..fd9f465
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/RunOnHadoopWizard.java
@@ -0,0 +1,346 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hdt.core.launch.ErrorMessageDialog;
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IPath;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy;
+import org.eclipse.jdt.launching.IJavaLaunchConfigurationConstants;
+import org.eclipse.jdt.launching.IRuntimeClasspathEntry;
+import org.eclipse.jdt.launching.JavaRuntime;
+import org.eclipse.jface.viewers.TableViewer;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.FillLayout;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Table;
+import org.eclipse.swt.widgets.TableColumn;
+import org.eclipse.swt.widgets.Text;
+
+/**
+ * Wizard for publishing a job to a Hadoop server.
+ */
+
+public class RunOnHadoopWizard extends Wizard {
+
+ private MainWizardPage mainPage;
+
+ private HadoopLocationWizard createNewPage;
+
+ /**
+ * The file resource (containing a main()) to run on the Hadoop location
+ */
+ private IFile resource;
+
+ /**
+ * The launch configuration to update
+ */
+ private ILaunchConfigurationWorkingCopy iConf;
+
+ private IProgressMonitor progressMonitor;
+
+ public RunOnHadoopWizard(IFile resource, ILaunchConfigurationWorkingCopy iConf) {
+ this.resource = resource;
+ this.iConf = iConf;
+ setForcePreviousAndNextButtons(true);
+ setNeedsProgressMonitor(true);
+ setWindowTitle("Run on Hadoop");
+ }
+
+ /**
+ * This wizard contains 2 pages: <li>the first one lets the user choose an
+ * already existing location <li>the second one allows the user to create a
+ * new location, in case it does not already exist
+ */
+ /* @inheritDoc */
+ @Override
+ public void addPages() {
+ addPage(this.mainPage = new MainWizardPage());
+ addPage(this.createNewPage = new HadoopLocationWizard());
+ }
+
+ /**
+ * Performs any actions appropriate in response to the user having pressed
+ * the Finish button, or refuse if finishing now is not permitted.
+ */
+ /* @inheritDoc */
+ @Override
+ public boolean performFinish() {
+
+ /*
+ * Create a new location or get an existing one
+ */
+ AbstractHadoopCluster location = null;
+ if (mainPage.createNew.getSelection()) {
+ location = createNewPage.performFinish();
+
+ } else if (mainPage.table.getSelection().length == 1) {
+ location = (AbstractHadoopCluster) mainPage.table.getSelection()[0].getData();
+ }
+
+ if (location == null)
+ return false;
+
+ /*
+ * Get the base directory of the plug-in for storing configurations and
+ * JARs
+ */
+ File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+ // Package the Job into a JAR
+ File jarFile = JarModule.createJarPackage(resource);
+ if (jarFile == null) {
+ ErrorMessageDialog.display("Run on Hadoop", "Unable to create or locate the JAR file for the Job");
+ return false;
+ }
+
+ /*
+ * Generate a temporary Hadoop configuration directory and add it to the
+ * classpath of the launch configuration
+ */
+
+ File confDir;
+ try {
+ confDir = File.createTempFile("hadoop-conf-", "", baseDir);
+ confDir.delete();
+ confDir.mkdirs();
+ if (!confDir.isDirectory()) {
+ ErrorMessageDialog.display("Run on Hadoop", "Cannot create temporary directory: " + confDir);
+ return false;
+ }
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ return false;
+ }
+ try {
+ location.saveConfiguration(confDir, jarFile.getAbsolutePath());
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ return false;
+ }
+ // Setup the Launch class path
+ List<String> classPath;
+ try {
+ classPath = iConf.getAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH, new ArrayList());
+ IPath confIPath = new Path(confDir.getAbsolutePath());
+ IRuntimeClasspathEntry cpEntry = JavaRuntime.newArchiveRuntimeClasspathEntry(confIPath);
+ classPath.add(0, cpEntry.getMemento());
+ iConf.setAttribute(IJavaLaunchConfigurationConstants.ATTR_CLASSPATH, classPath);
+ iConf.setAttribute(IJavaLaunchConfigurationConstants.ATTR_PROGRAM_ARGUMENTS, mainPage.argumentsText.getText());
+
+ } catch (CoreException e) {
+ e.printStackTrace();
+ return false;
+ }
+
+ // location.runResource(resource, progressMonitor);
+ return true;
+ }
+
+ private void refreshButtons() {
+ getContainer().updateButtons();
+ }
+
+ /**
+ * Allows finish when an existing server is selected or when a new server
+ * location is defined
+ */
+ /* @inheritDoc */
+ @Override
+ public boolean canFinish() {
+ if (mainPage != null)
+ return mainPage.canFinish();
+ return false;
+ }
+
+ /**
+ * This is the main page of the wizard. It allows the user either to choose
+ * an already existing location or to indicate he wants to create a new
+ * location.
+ */
+ public class MainWizardPage extends WizardPage {
+
+ private Button createNew;
+
+ private Table table;
+ private Text argumentsText;
+
+ private Button chooseExisting;
+
+ public MainWizardPage() {
+ super("Select or define server to run on");
+ setTitle("Select Hadoop location");
+ setDescription("Select a Hadoop location to run on.");
+ }
+
+ /* @inheritDoc */
+ @Override
+ public boolean canFlipToNextPage() {
+ return createNew.getSelection();
+ }
+
+ /* @inheritDoc */
+ public void createControl(Composite parent) {
+ Composite panel = new Composite(parent, SWT.NONE);
+ panel.setLayout(new GridLayout(1, false));
+
+ // Label
+ Label label = new Label(panel, SWT.NONE);
+ label.setText("Select a Hadoop Server to run on.");
+ GridData gData = new GridData(GridData.FILL_BOTH);
+ gData.grabExcessVerticalSpace = false;
+ label.setLayoutData(gData);
+
+ // Create location button
+ createNew = new Button(panel, SWT.RADIO);
+ createNew.setText("Define a new Hadoop server location");
+ createNew.setLayoutData(gData);
+ createNew.addSelectionListener(new SelectionListener() {
+ public void widgetDefaultSelected(SelectionEvent e) {
+ }
+
+ public void widgetSelected(SelectionEvent e) {
+ setPageComplete(true);
+ RunOnHadoopWizard.this.refreshButtons();
+ }
+ });
+ createNew.setSelection(true);
+
+ // Select existing location button
+ chooseExisting = new Button(panel, SWT.RADIO);
+ chooseExisting.setText("Choose an existing server from the list below");
+ chooseExisting.setLayoutData(gData);
+ chooseExisting.addSelectionListener(new SelectionListener() {
+ public void widgetDefaultSelected(SelectionEvent e) {
+ }
+
+ public void widgetSelected(SelectionEvent e) {
+ if (chooseExisting.getSelection() && (table.getSelectionCount() == 0)) {
+ if (table.getItems().length > 0) {
+ table.setSelection(0);
+ }
+ }
+ RunOnHadoopWizard.this.refreshButtons();
+ }
+ });
+
+ // Table of existing locations
+ Composite serverListPanel = new Composite(panel, SWT.FILL);
+ gData = new GridData(GridData.FILL_BOTH);
+ gData.horizontalSpan = 1;
+ serverListPanel.setLayoutData(gData);
+
+ FillLayout layout = new FillLayout();
+ layout.marginHeight = layout.marginWidth = 12;
+ serverListPanel.setLayout(layout);
+
+ table = new Table(serverListPanel, SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL | SWT.FULL_SELECTION);
+ table.setHeaderVisible(true);
+ table.setLinesVisible(true);
+
+ TableColumn nameColumn = new TableColumn(table, SWT.LEFT);
+ nameColumn.setText("Location");
+ nameColumn.setWidth(450);
+
+ TableColumn hostColumn = new TableColumn(table, SWT.LEFT);
+ hostColumn.setText("Master host name");
+ hostColumn.setWidth(250);
+
+ // If the user select one entry, switch to "chooseExisting"
+ table.addSelectionListener(new SelectionListener() {
+ public void widgetDefaultSelected(SelectionEvent e) {
+ }
+
+ public void widgetSelected(SelectionEvent e) {
+ chooseExisting.setSelection(true);
+ createNew.setSelection(false);
+ setPageComplete(table.getSelectionCount() == 1);
+ RunOnHadoopWizard.this.refreshButtons();
+ }
+ });
+
+ // Label
+ Label argumentsLabel = new Label(panel, SWT.NONE);
+ argumentsLabel.setText("Arguments:");
+ GridData gDataArgumentsLabel = new GridData(GridData.FILL_BOTH);
+ gDataArgumentsLabel.grabExcessVerticalSpace = false;
+ argumentsLabel.setLayoutData(gDataArgumentsLabel);
+
+ // Textbox
+ argumentsText = new Text(panel, SWT.NONE);
+ try {
+ argumentsText.setText(iConf.getAttribute(IJavaLaunchConfigurationConstants.ATTR_PROGRAM_ARGUMENTS, ""));
+ } catch (CoreException e1) {
+ e1.printStackTrace();
+ }
+ GridData gDataArgumentsText = new GridData(GridData.FILL_BOTH);
+ gDataArgumentsText.grabExcessVerticalSpace = false;
+ argumentsText.setLayoutData(gDataArgumentsText);
+
+ TableViewer viewer = new TableViewer(table);
+ HadoopServerSelectionListContentProvider provider = new HadoopServerSelectionListContentProvider();
+ viewer.setContentProvider(provider);
+ viewer.setLabelProvider(provider);
+ viewer.setInput(new Object());
+ // don't care, get from singleton server registry
+
+ this.setControl(panel);
+ }
+
+ /**
+ * Returns whether this page state allows the Wizard to finish or not
+ *
+ * @return can the wizard finish or not?
+ */
+ public boolean canFinish() {
+ if (!isControlCreated())
+ return false;
+
+ if (this.createNew.getSelection())
+ return getNextPage().isPageComplete();
+
+ return this.chooseExisting.getSelection();
+ }
+ }
+
+ /**
+ * @param progressMonitor
+ */
+ public void setProgressMonitor(IProgressMonitor progressMonitor) {
+ this.progressMonitor = progressMonitor;
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
new file mode 100644
index 0000000..f03fb50
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/launch/ServerRegistry.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.launch;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import javax.security.auth.login.Configuration;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopClusterListener;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.resources.WorkspaceJob;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.jface.dialogs.MessageDialog;
+
+/**
+ * Register of Hadoop locations.
+ *
+ * Each location corresponds to a Hadoop {@link Configuration} stored as an XML
+ * file in the workspace plug-in configuration directory:
+ * <p>
+ * <tt>
+ * <workspace-dir>/.metadata/.plugins/org.apache.hadoop.eclipse/locations/*.xml
+ * </tt>
+ *
+ */
+public class ServerRegistry {
+
+ private static final ServerRegistry INSTANCE = new ServerRegistry();
+
+ public static final int SERVER_ADDED = 0;
+
+ public static final int SERVER_REMOVED = 1;
+
+ public static final int SERVER_STATE_CHANGED = 2;
+
+ private final File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+ private final File saveDir = new File(baseDir, "locations");
+
+ private ServerRegistry() {
+ if (saveDir.exists() && !saveDir.isDirectory())
+ saveDir.delete();
+ if (!saveDir.exists())
+ saveDir.mkdirs();
+
+ load();
+ }
+
+ private Map<String, AbstractHadoopCluster> servers;
+
+ private Set<IHadoopClusterListener> listeners = new HashSet<IHadoopClusterListener>();
+
+ public static ServerRegistry getInstance() {
+ return INSTANCE;
+ }
+
+ public synchronized Collection<AbstractHadoopCluster> getServers() {
+ return Collections.unmodifiableCollection(servers.values());
+ }
+
+ /**
+ * Load all available locations from the workspace configuration directory.
+ */
+ private synchronized void load() {
+ Map<String, AbstractHadoopCluster> map = new TreeMap<String, AbstractHadoopCluster>();
+ for (File file : saveDir.listFiles()) {
+ try {
+ AbstractHadoopCluster server = AbstractHadoopCluster.createCluster(file);
+ map.put(server.getLocationName(), server);
+
+ } catch (Exception exn) {
+ System.err.println(exn);
+ }
+ }
+ this.servers = map;
+ }
+
+ private synchronized void store() {
+ try {
+ File dir = File.createTempFile("locations", "new", baseDir);
+ dir.delete();
+ dir.mkdirs();
+
+ for (AbstractHadoopCluster server : servers.values()) {
+ server.storeSettingsToFile(new File(dir, server.getLocationName() + ".xml"));
+ }
+
+ FilenameFilter XMLFilter = new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ String lower = name.toLowerCase();
+ return lower.endsWith(".xml");
+ }
+ };
+
+ File backup = new File(baseDir, "locations.backup");
+ if (backup.exists()) {
+ for (File file : backup.listFiles(XMLFilter))
+ if (!file.delete())
+ throw new IOException("Unable to delete backup location file: " + file);
+ if (!backup.delete())
+ throw new IOException("Unable to delete backup location directory: " + backup);
+ }
+
+ saveDir.renameTo(backup);
+ dir.renameTo(saveDir);
+
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ MessageDialog.openError(null, "Saving configuration of Hadoop locations failed", ioe.toString());
+ }
+ }
+
+ public void dispose() {
+ for (AbstractHadoopCluster server : getServers()) {
+ server.dispose();
+ }
+ }
+
+ public synchronized AbstractHadoopCluster getServer(String location) {
+ return servers.get(location);
+ }
+
+ /*
+ * HadoopServer map listeners
+ */
+
+ public void addListener(IHadoopClusterListener l) {
+ synchronized (listeners) {
+ listeners.add(l);
+ }
+ }
+
+ public void removeListener(IHadoopClusterListener l) {
+ synchronized (listeners) {
+ listeners.remove(l);
+ }
+ }
+
+ private void fireListeners(AbstractHadoopCluster location, int kind) {
+ synchronized (listeners) {
+ for (IHadoopClusterListener listener : listeners) {
+ listener.serverChanged(location, kind);
+ }
+ }
+ }
+
+ public synchronized void removeServer(AbstractHadoopCluster server) {
+ this.servers.remove(server.getLocationName());
+ store();
+ fireListeners(server, SERVER_REMOVED);
+ }
+
+ public synchronized void addServer(final AbstractHadoopCluster server) {
+ WorkspaceJob job= new WorkspaceJob("Adding Hadoop Server") {
+ @Override
+ public IStatus runInWorkspace(IProgressMonitor monitor) throws CoreException {
+ if(server.isAvailable()){
+ servers.put(server.getLocationName(), server);
+ store();
+ fireListeners(server, SERVER_ADDED);
+ }
+ return org.eclipse.core.runtime.Status.OK_STATUS;
+ }};
+
+ job.setPriority(Job.LONG);
+ job.setRule(ResourcesPlugin.getWorkspace().getRoot());
+ job.setUser(true);
+ job.schedule();
+ }
+
+ /**
+ * Update one Hadoop location
+ *
+ * @param originalName
+ * the original location name (might have changed)
+ * @param server
+ * the location
+ */
+ public synchronized void updateServer(final String originalName, final AbstractHadoopCluster server) {
+ WorkspaceJob job= new WorkspaceJob("Updating Hadoop Server") {
+ @Override
+ public IStatus runInWorkspace(IProgressMonitor monitor) throws CoreException {
+ // Update the map if the location name has changed
+ if (!server.getLocationName().equals(originalName) && server.isAvailable()) {
+ servers.remove(originalName);
+ servers.put(server.getLocationName(), server);
+ store();
+ fireListeners(server, SERVER_STATE_CHANGED);
+ }
+
+ return org.eclipse.core.runtime.Status.OK_STATUS;
+ }};
+
+ job.setPriority(Job.LONG);
+ job.setUser(true);
+ job.schedule();
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
new file mode 100644
index 0000000..7ac0582
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/ClusterView.java
@@ -0,0 +1,415 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import java.util.Collection;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.core.launch.IHadoopClusterListener;
+import org.apache.hdt.core.launch.IHadoopJob;
+import org.apache.hdt.core.launch.IJarModule;
+import org.apache.hdt.core.launch.IJobListener;
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.internal.launch.JarModule;
+import org.apache.hdt.ui.internal.launch.ServerRegistry;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.action.IMenuListener;
+import org.eclipse.jface.action.IMenuManager;
+import org.eclipse.jface.action.MenuManager;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.ISelectionChangedListener;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.ITreeContentProvider;
+import org.eclipse.jface.viewers.ITreeSelection;
+import org.eclipse.jface.viewers.SelectionChangedEvent;
+import org.eclipse.jface.viewers.TreeViewer;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Menu;
+import org.eclipse.swt.widgets.Tree;
+import org.eclipse.swt.widgets.TreeColumn;
+import org.eclipse.ui.IViewSite;
+import org.eclipse.ui.PartInitException;
+import org.eclipse.ui.actions.ActionFactory;
+import org.eclipse.ui.part.ViewPart;
+
+/**
+ * Map/Reduce locations view: displays all available Hadoop locations and the
+ * Jobs running/finished on these locations
+ */
+public class ClusterView extends ViewPart implements ITreeContentProvider, ITableLabelProvider, IJobListener, IHadoopClusterListener {
+
+ /**
+ * Deletion action: delete a Hadoop location, kill a running job or remove a
+ * finished job entry
+ */
+ class DeleteAction extends Action {
+
+ DeleteAction() {
+ setText("Delete");
+ setImageDescriptor(ImageLibrary.get("server.view.action.delete"));
+ }
+
+ /* @inheritDoc */
+ @Override
+ public void run() {
+ ISelection selection = getViewSite().getSelectionProvider().getSelection();
+ if ((selection != null) && (selection instanceof IStructuredSelection)) {
+ Object selItem = ((IStructuredSelection) selection).getFirstElement();
+
+ if (selItem instanceof AbstractHadoopCluster) {
+ AbstractHadoopCluster location = (AbstractHadoopCluster) selItem;
+ if (MessageDialog.openConfirm(Display.getDefault().getActiveShell(), "Confirm delete Hadoop location",
+ "Do you really want to remove the Hadoop location: " + location.getLocationName())) {
+ ServerRegistry.getInstance().removeServer(location);
+ }
+
+ } else if (selItem instanceof IHadoopJob) {
+
+ // kill the job
+ IHadoopJob job = (IHadoopJob) selItem;
+ if (job.isCompleted()) {
+ // Job already finished, remove the entry
+ job.getLocation().purgeJob(job);
+
+ } else {
+ // Job is running, kill the job?
+ if (MessageDialog.openConfirm(Display.getDefault().getActiveShell(), "Confirm kill running Job",
+ "Do you really want to kill running Job: " + job.getJobID())) {
+ job.kill();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * This object is the root content for this content provider
+ */
+ private static final Object CONTENT_ROOT = new Object();
+
+ private final IAction deleteAction = new DeleteAction();
+
+ private final IAction editServerAction = new EditLocationAction(this);
+
+ private final IAction newLocationAction = new NewLocationAction();
+
+ private TreeViewer viewer;
+
+ public ClusterView() {
+ }
+
+ /* @inheritDoc */
+ @Override
+ public void init(IViewSite site) throws PartInitException {
+ super.init(site);
+ }
+
+ /* @inheritDoc */
+ @Override
+ public void dispose() {
+ ServerRegistry.getInstance().removeListener(this);
+ }
+
+ /**
+ * Creates the columns for the view
+ */
+ @Override
+ public void createPartControl(Composite parent) {
+ Tree main = new Tree(parent, SWT.SINGLE | SWT.FULL_SELECTION | SWT.H_SCROLL | SWT.V_SCROLL);
+ main.setHeaderVisible(true);
+ main.setLinesVisible(false);
+ main.setLayoutData(new GridData(GridData.FILL_BOTH));
+
+ TreeColumn serverCol = new TreeColumn(main, SWT.SINGLE);
+ serverCol.setText("Location");
+ serverCol.setWidth(300);
+ serverCol.setResizable(true);
+
+ TreeColumn locationCol = new TreeColumn(main, SWT.SINGLE);
+ locationCol.setText("Master node");
+ locationCol.setWidth(185);
+ locationCol.setResizable(true);
+
+ TreeColumn stateCol = new TreeColumn(main, SWT.SINGLE);
+ stateCol.setText("State");
+ stateCol.setWidth(95);
+ stateCol.setResizable(true);
+
+ TreeColumn statusCol = new TreeColumn(main, SWT.SINGLE);
+ statusCol.setText("Status");
+ statusCol.setWidth(300);
+ statusCol.setResizable(true);
+
+ viewer = new TreeViewer(main);
+ viewer.setContentProvider(this);
+ viewer.setLabelProvider(this);
+ viewer.setInput(CONTENT_ROOT); // don't care
+
+ getViewSite().setSelectionProvider(viewer);
+
+ getViewSite().getActionBars().setGlobalActionHandler(ActionFactory.DELETE.getId(), deleteAction);
+ getViewSite().getActionBars().getToolBarManager().add(editServerAction);
+ getViewSite().getActionBars().getToolBarManager().add(newLocationAction);
+ createContextMenu();
+ }
+
+
+ /**
+ * Contextual menu
+ */
+ private void createContextMenu() {
+ // Create menu manager.
+ MenuManager menuMgr = new MenuManager();
+ menuMgr.setRemoveAllWhenShown(true);
+ menuMgr.addMenuListener(new IMenuListener() {
+ public void menuAboutToShow(IMenuManager mgr) {
+ fillContextMenu(mgr);
+ }
+ });
+
+ // Create menu.
+ Menu menu = menuMgr.createContextMenu(viewer.getControl());
+ viewer.getControl().setMenu(menu);
+
+ // Register menu for extension.
+ getSite().registerContextMenu(menuMgr, viewer);
+ }
+
+ private void fillContextMenu(IMenuManager mgr) {
+ IStructuredSelection sel = (IStructuredSelection) viewer.getSelection();
+ Object firstElement = sel.getFirstElement();
+ if(firstElement instanceof IHadoopJob){
+ mgr.add(deleteAction);
+ }else{
+ mgr.add(newLocationAction);
+ mgr.add(editServerAction);
+ mgr.add(deleteAction);
+ }
+ }
+
+ /* @inheritDoc */
+ @Override
+ public void setFocus() {
+
+ }
+
+ /*
+ * IHadoopServerListener implementation
+ */
+
+ /* @inheritDoc */
+ public void serverChanged(AbstractHadoopCluster location, int type) {
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ ClusterView.this.viewer.refresh();
+ }
+ });
+ }
+
+ /*
+ * IStructuredContentProvider implementation
+ */
+
+ /* @inheritDoc */
+ public void inputChanged(final Viewer viewer, Object oldInput, Object newInput) {
+ if (oldInput == CONTENT_ROOT)
+ ServerRegistry.getInstance().removeListener(this);
+ if (newInput == CONTENT_ROOT)
+ ServerRegistry.getInstance().addListener(this);
+ }
+
+ /**
+ * The root elements displayed by this view are the existing Hadoop
+ * locations
+ */
+ /* @inheritDoc */
+ public Object[] getElements(Object inputElement) {
+ return ServerRegistry.getInstance().getServers().toArray();
+ }
+
+ /*
+ * ITreeStructuredContentProvider implementation
+ */
+
+ /**
+ * Each location contains a child entry for each job it runs.
+ */
+ /* @inheritDoc */
+ public Object[] getChildren(Object parent) {
+
+ if (parent instanceof AbstractHadoopCluster) {
+ AbstractHadoopCluster location = (AbstractHadoopCluster) parent;
+ location.addJobListener(this);
+ Collection<? extends IHadoopJob> jobs = location.getJobs();
+ return jobs.toArray();
+ }
+
+ return null;
+ }
+
+ /* @inheritDoc */
+ public Object getParent(Object element) {
+ if (element instanceof AbstractHadoopCluster) {
+ return CONTENT_ROOT;
+
+ } else if (element instanceof IHadoopJob) {
+ return ((IHadoopJob) element).getLocation();
+ }
+
+ return null;
+ }
+
+ /* @inheritDoc */
+ public boolean hasChildren(Object element) {
+ /* Only server entries have children */
+ return (element instanceof AbstractHadoopCluster);
+ }
+
+ /*
+ * ITableLabelProvider implementation
+ */
+
+ /* @inheritDoc */
+ public void addListener(ILabelProviderListener listener) {
+ // no listeners handling
+ }
+
+ public boolean isLabelProperty(Object element, String property) {
+ return false;
+ }
+
+ /* @inheritDoc */
+ public void removeListener(ILabelProviderListener listener) {
+ // no listener handling
+ }
+
+ /* @inheritDoc */
+ public Image getColumnImage(Object element, int columnIndex) {
+ if ((columnIndex == 0) && (element instanceof AbstractHadoopCluster)) {
+ return ImageLibrary.getImage("server.view.location.entry");
+
+ } else if ((columnIndex == 0) && (element instanceof IHadoopJob)) {
+ return ImageLibrary.getImage("server.view.job.entry");
+ }
+ return null;
+ }
+
+ /* @inheritDoc */
+ public String getColumnText(Object element, int columnIndex) {
+ if (element instanceof AbstractHadoopCluster) {
+ AbstractHadoopCluster server = (AbstractHadoopCluster) element;
+
+ switch (columnIndex) {
+ case 0:
+ return server.getLocationName();
+ case 1:
+ return server.getMasterHostName().toString();
+ case 2:
+ return server.getState();
+ case 3:
+ return "";
+ }
+ } else if (element instanceof IHadoopJob) {
+ IHadoopJob job = (IHadoopJob) element;
+
+ switch (columnIndex) {
+ case 0:
+ return "" + job.getJobID();
+ case 1:
+ return "";
+ case 2:
+ return job.getState();
+ case 3:
+ return job.getStatus();
+ }
+ } else if (element instanceof JarModule) {
+ JarModule jar = (JarModule) element;
+
+ switch (columnIndex) {
+ case 0:
+ return jar.toString();
+ case 1:
+ return "Publishing jar to server..";
+ case 2:
+ return "";
+ }
+ }
+
+ return null;
+ }
+
+ /*
+ * IJobListener (Map/Reduce Jobs listener) implementation
+ */
+
+ /* @inheritDoc */
+ public void jobAdded(IHadoopJob job) {
+ viewer.refresh();
+ }
+
+ /* @inheritDoc */
+ public void jobRemoved(IHadoopJob job) {
+ viewer.refresh();
+ }
+
+ /* @inheritDoc */
+ public void jobChanged(IHadoopJob job) {
+ viewer.refresh(job);
+ }
+
+ /* @inheritDoc */
+ public void publishDone(IJarModule jar) {
+ viewer.refresh();
+ }
+
+ /* @inheritDoc */
+ public void publishStart(IJarModule jar) {
+ viewer.refresh();
+ }
+
+ /*
+ * Miscellaneous
+ */
+
+ /**
+ * Return the currently selected server (null if there is no selection or if
+ * the selection is not a server)
+ *
+ * @return the currently selected server entry
+ */
+ public AbstractHadoopCluster getSelectedServer() {
+ ITreeSelection selection = (ITreeSelection) viewer.getSelection();
+ Object first = selection.getFirstElement();
+ if (first instanceof AbstractHadoopCluster) {
+ return (AbstractHadoopCluster) first;
+ }
+ return null;
+ }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/EditLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/EditLocationAction.java
new file mode 100644
index 0000000..416241a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/EditLocationAction.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+/**
+ * Editing server properties action
+ */
+public class EditLocationAction extends Action {
+
+ private ClusterView serverView;
+
+ public EditLocationAction(ClusterView serverView) {
+ this.serverView = serverView;
+
+ setText("Edit Hadoop location...");
+ setImageDescriptor(ImageLibrary.get("server.view.action.location.edit"));
+ }
+
+ @Override
+ public void run() {
+
+ final AbstractHadoopCluster server = serverView.getSelectedServer();
+ if (server == null)
+ return;
+
+ WizardDialog dialog = new WizardDialog(null, new Wizard() {
+ private HadoopLocationWizard page = new HadoopLocationWizard(server);
+
+ @Override
+ public void addPages() {
+ super.addPages();
+ setWindowTitle("Edit Hadoop location...");
+ addPage(page);
+ }
+
+ @Override
+ public boolean performFinish() {
+ page.performFinish();
+ return true;
+ }
+ });
+
+ dialog.create();
+ dialog.setBlockOnOpen(true);
+ dialog.open();
+
+ super.run();
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizard.java
new file mode 100644
index 0000000..14dcb49
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizard.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Driver class (a class that runs a MapReduce job).
+ *
+ */
+
+public class NewDriverWizard extends NewElementWizard implements INewWizard,
+ IRunnableWithProgress {
+ private NewDriverWizardPage page;
+
+ /*
+ * @Override public boolean performFinish() { }
+ */
+ public void run(IProgressMonitor monitor) {
+ try {
+ page.createType(monitor);
+ } catch (CoreException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+
+ public NewDriverWizard() {
+ setWindowTitle("New MapReduce Driver");
+ }
+
+ @Override
+ public void init(IWorkbench workbench, IStructuredSelection selection) {
+ super.init(workbench, selection);
+
+ page = new NewDriverWizardPage();
+ addPage(page);
+ page.setSelection(selection);
+ }
+
+ @Override
+ /**
+ * Performs any actions appropriate in response to the user having pressed the
+ * Finish button, or refuse if finishing now is not permitted.
+ */
+ public boolean performFinish() {
+ if (super.performFinish()) {
+ if (getCreatedElement() != null) {
+ selectAndReveal(page.getModifiedResource());
+ openResource((IFile) page.getModifiedResource());
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ /**
+ *
+ */
+ protected void finishPage(IProgressMonitor monitor)
+ throws InterruptedException, CoreException {
+ this.run(monitor);
+ }
+
+ @Override
+ public IJavaElement getCreatedElement() {
+ return page.getCreatedType().getPrimaryElement();
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizardPage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizardPage.java
new file mode 100644
index 0000000..4857529
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewDriverWizardPage.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.mr;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.FileLocator;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.core.JavaModelException;
+import org.eclipse.jdt.core.search.SearchEngine;
+import org.eclipse.jdt.ui.IJavaElementSearchConstants;
+import org.eclipse.jdt.ui.JavaUI;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.dialogs.ProgressMonitorDialog;
+import org.eclipse.jface.resource.ImageDescriptor;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.window.Window;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.Text;
+import org.eclipse.ui.dialogs.SelectionDialog;
+
+/**
+ * Pre-fills the new MapReduce driver class with a template.
+ *
+ */
+
+public class NewDriverWizardPage extends NewTypeWizardPage {
+ private Button isCreateMapMethod;
+
+ private Text reducerText;
+
+ private Text mapperText;
+
+ private final boolean showContainerSelector;
+
+ public NewDriverWizardPage() {
+ this(true);
+ }
+
+ public NewDriverWizardPage(boolean showContainerSelector) {
+ super(true, "MapReduce Driver");
+
+ this.showContainerSelector = showContainerSelector;
+ setTitle("MapReduce Driver");
+ setDescription("Create a new MapReduce driver");
+ setImageDescriptor(ImageLibrary.get("wizard.driver.new"));
+ }
+
+ public void setSelection(IStructuredSelection selection) {
+ initContainerPage(getInitialJavaElement(selection));
+ initTypePage(getInitialJavaElement(selection));
+ }
+
+ @Override
+ /**
+ * Creates the new type using the entered field values.
+ */
+ public void createType(IProgressMonitor monitor) throws CoreException,
+ InterruptedException {
+ super.createType(monitor);
+ }
+
+ @Override
+ protected void createTypeMembers(final IType newType, ImportsManager imports,
+ final IProgressMonitor monitor) throws CoreException {
+ super.createTypeMembers(newType, imports, monitor);
+ imports.addImport("org.apache.hadoop.fs.Path");
+ imports.addImport("org.apache.hadoop.io.Text");
+ imports.addImport("org.apache.hadoop.io.IntWritable");
+ imports.addImport("org.apache.hadoop.mapreduce.Job");
+ imports.addImport("org.apache.hadoop.mapreduce.lib.input.FileInputFormat");
+ imports.addImport("org.apache.hadoop.mapreduce.lib.output.FileOutputFormat");
+
+ /**
+ * TODO(jz) - move most code out of the runnable
+ */
+ getContainer().getShell().getDisplay().syncExec(new Runnable() {
+ public void run() {
+
+ String method = "public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {\n";
+ method += " Job job = new Job();\n\n";
+ method += " job.setJarByClass( ... );\n\n";
+ method += " job.setJobName( \"a nice name\" );\n\n";
+
+ method += " FileInputFormat.setInputPaths(job, new Path(args[0]));\n";
+ method += " FileOutputFormat.setOutputPath(job, new Path(args[1]));\n\n";
+
+ if (mapperText.getText().length() > 0) {
+ method += " job.setMapperClass(" + mapperText.getText()
+ + ".class);\n\n";
+ } else {
+ method += " // TODO: specify a mapper\njob.setMapperClass( ... );\n\n";
+ }
+ if (reducerText.getText().length() > 0) {
+ method += " job.setReducerClass(" + reducerText.getText()
+ + ".class);\n\n";
+ } else {
+ method += " // TODO: specify a reducer\njob.setReducerClass( ... );\n\n";
+ }
+
+ method += " job.setOutputKeyClass(Text.class);\n";
+ method += " job.setOutputValueClass(IntWritable.class);\n\n";
+
+ method += " boolean success = job.waitForCompletion(true);\n";
+ method += " System.exit(success ? 0 : 1);\n\t};";
+
+ try {
+ newType.createMethod(method, null, false, monitor);
+ } catch (JavaModelException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ });
+ }
+
+ public void createControl(Composite parent) {
+ // super.createControl(parent);
+
+ initializeDialogUnits(parent);
+ Composite composite = new Composite(parent, SWT.NONE);
+ GridLayout layout = new GridLayout();
+ layout.numColumns = 4;
+ composite.setLayout(layout);
+
+ createContainerControls(composite, 4);
+
+ createPackageControls(composite, 4);
+ createSeparator(composite, 4);
+ createTypeNameControls(composite, 4);
+
+ createSuperClassControls(composite, 4);
+ createSuperInterfacesControls(composite, 4);
+ createSeparator(composite, 4);
+
+ createMapperControls(composite);
+ createReducerControls(composite);
+
+ if (!showContainerSelector) {
+ setPackageFragmentRoot(null, false);
+ setSuperClass("java.lang.Object", false);
+ setSuperInterfaces(new ArrayList(), false);
+ }
+
+ setControl(composite);
+
+ setFocus();
+ handleFieldChanged(CONTAINER);
+
+ // setSuperClass("org.apache.hadoop.mapred.MapReduceBase", true);
+ // setSuperInterfaces(Arrays.asList(new String[]{
+ // "org.apache.hadoop.mapred.Mapper" }), true);
+ }
+
+ @Override
+ protected void handleFieldChanged(String fieldName) {
+ super.handleFieldChanged(fieldName);
+
+ validate();
+ }
+
+ private void validate() {
+ if (showContainerSelector) {
+ updateStatus(new IStatus[] { fContainerStatus, fPackageStatus,
+ fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+ } else {
+ updateStatus(new IStatus[] { fTypeNameStatus, });
+ }
+ }
+
+ private void createMapperControls(Composite composite) {
+ this.mapperText = createBrowseClassControl(composite, "Ma&pper:",
+ "&Browse...", "org.apache.hadoop.mapreduce.Mapper", "Mapper Selection");
+ }
+
+ private void createReducerControls(Composite composite) {
+ this.reducerText = createBrowseClassControl(composite, "&Reducer:",
+ "Browse&...", "org.apache.hadoop.mapreduce.Reducer", "Reducer Selection");
+ }
+
+ private Text createBrowseClassControl(final Composite composite,
+ final String string, String browseButtonLabel,
+ final String baseClassName, final String dialogTitle) {
+ Label label = new Label(composite, SWT.NONE);
+ GridData data = new GridData(GridData.FILL_HORIZONTAL);
+ label.setText(string);
+ label.setLayoutData(data);
+
+ final Text text = new Text(composite, SWT.SINGLE | SWT.BORDER);
+ GridData data2 = new GridData(GridData.FILL_HORIZONTAL);
+ data2.horizontalSpan = 2;
+ text.setLayoutData(data2);
+
+ Button browse = new Button(composite, SWT.NONE);
+ browse.setText(browseButtonLabel);
+ GridData data3 = new GridData(GridData.FILL_HORIZONTAL);
+ browse.setLayoutData(data3);
+ browse.addListener(SWT.Selection, new Listener() {
+ public void handleEvent(Event event) {
+ IType baseType;
+ try {
+ baseType = getPackageFragmentRoot().getJavaProject().findType(
+ baseClassName);
+
+ // edit this to limit the scope
+ SelectionDialog dialog = JavaUI.createTypeDialog(
+ composite.getShell(), new ProgressMonitorDialog(composite
+ .getShell()), SearchEngine.createHierarchyScope(baseType),
+ IJavaElementSearchConstants.CONSIDER_CLASSES, false);
+
+ dialog.setMessage("&Choose a type:");
+ dialog.setBlockOnOpen(true);
+ dialog.setTitle(dialogTitle);
+ dialog.open();
+
+ if ((dialog.getReturnCode() == Window.OK)
+ && (dialog.getResult().length > 0)) {
+ IType type = (IType) dialog.getResult()[0];
+ text.setText(type.getFullyQualifiedName());
+ }
+ } catch (JavaModelException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ });
+
+ if (!showContainerSelector) {
+ label.setEnabled(false);
+ text.setEnabled(false);
+ browse.setEnabled(false);
+ }
+
+ return text;
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationAction.java
new file mode 100644
index 0000000..20e269e
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationAction.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+/**
+ * Action corresponding to creating a new MapReduce Server.
+ */
+
+public class NewLocationAction extends Action {
+ public NewLocationAction() {
+ setText("New Hadoop location...");
+ setImageDescriptor(ImageLibrary.get("server.view.action.location.new"));
+ }
+
+ @Override
+ public void run() {
+ WizardDialog dialog = new WizardDialog(null, new Wizard() {
+ private HadoopLocationWizard page = new HadoopLocationWizard();
+
+ @Override
+ public void addPages() {
+ super.addPages();
+ setWindowTitle("New Hadoop location...");
+ addPage(page);
+ }
+
+ @Override
+ public boolean performFinish() {
+ page.performFinish();
+ return true;
+ }
+
+ });
+
+ dialog.create();
+ dialog.setBlockOnOpen(true);
+ dialog.open();
+
+ super.run();
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationWizard.java
new file mode 100644
index 0000000..ee4e399
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewLocationWizard.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.core.launch.AbstractHadoopCluster;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
+
+public class NewLocationWizard extends Wizard implements INewWizard,IExecutableExtension{
+
+ private HadoopLocationWizard serverLocationWizardPage;
+ private IConfigurationElement configElement;
+
+ /* (non-Javadoc)
+ * @see org.eclipse.ui.IWorkbenchWizard#init(org.eclipse.ui.IWorkbench, org.eclipse.jface.viewers.IStructuredSelection)
+ */
+ @Override
+ public void init(IWorkbench workbench, IStructuredSelection selection) {
+ // TODO Auto-generated method stub
+
+ }
+ @Override
+ public void addPages() {
+ super.addPages();
+ if (serverLocationWizardPage == null) {
+ serverLocationWizardPage = new HadoopLocationWizard();
+ }
+ addPage(serverLocationWizardPage);
+ }
+ /* (non-Javadoc)
+ * @see org.eclipse.core.runtime.IExecutableExtension#setInitializationData(org.eclipse.core.runtime.IConfigurationElement, java.lang.String, java.lang.Object)
+ */
+ @Override
+ public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+ this.configElement=config;
+ }
+
+ /* (non-Javadoc)
+ * @see org.eclipse.jface.wizard.Wizard#performFinish()
+ */
+ @Override
+ public boolean performFinish() {
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ BasicNewProjectResourceWizard.updatePerspective(configElement);
+ }
+ });
+ AbstractHadoopCluster cluster = serverLocationWizardPage.performFinish();
+ return cluster!=null;
+ }
+
+}
\ No newline at end of file
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
new file mode 100644
index 0000000..4b88403
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapReduceProjectWizard.java
@@ -0,0 +1,441 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+import org.apache.hdt.core.HadoopVersion;
+import org.apache.hdt.core.natures.MapReduceNature;
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.preferences.MapReducePreferencePage;
+import org.apache.hdt.ui.preferences.PreferenceConstants;
+import org.eclipse.core.resources.IProject;
+import org.eclipse.core.resources.IProjectDescription;
+import org.eclipse.core.resources.ResourcesPlugin;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.NullProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.core.runtime.QualifiedName;
+import org.eclipse.core.runtime.SubProgressMonitor;
+import org.eclipse.jdt.ui.wizards.NewJavaProjectWizardPage;
+import org.eclipse.jface.dialogs.IDialogConstants;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.preference.PreferenceDialog;
+import org.eclipse.jface.preference.PreferenceManager;
+import org.eclipse.jface.preference.PreferenceNode;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.wizard.IWizardPage;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Combo;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.DirectoryDialog;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Link;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.Text;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.PlatformUI;
+import org.eclipse.ui.dialogs.WizardNewProjectCreationPage;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
+
+/**
+ * Wizard for creating a new MapReduce Project
+ *
+ */
+
+public class NewMapReduceProjectWizard extends Wizard implements INewWizard, IExecutableExtension {
+ static Logger log = Logger.getLogger(NewMapReduceProjectWizard.class.getName());
+
+ private HadoopFirstPage firstPage;
+
+ private NewJavaProjectWizardPage javaPage;
+
+ public NewDriverWizardPage newDriverPage;
+
+ private IConfigurationElement config;
+
+ public NewMapReduceProjectWizard() {
+ setWindowTitle("New MapReduce Project Wizard");
+ }
+
+ public void init(IWorkbench workbench, IStructuredSelection selection) {
+
+ }
+
+ @Override
+ public boolean canFinish() {
+ return firstPage.isPageComplete() && javaPage.isPageComplete()
+ // && ((!firstPage.generateDriver.getSelection())
+ // || newDriverPage.isPageComplete()
+ ;
+ }
+
+ @Override
+ public IWizardPage getNextPage(IWizardPage page) {
+ // if (page == firstPage
+ // && firstPage.generateDriver.getSelection()
+ // )
+ // {
+ // return newDriverPage; // if "generate mapper" checked, second page is
+ // new driver page
+ // }
+ // else
+ // {
+ IWizardPage answer = super.getNextPage(page);
+ if (answer == newDriverPage) {
+ return null; // dont flip to new driver page unless "generate
+ // driver" is checked
+ } else if (answer == javaPage) {
+ return answer;
+ } else {
+ return answer;
+ }
+ // }
+ }
+
+ @Override
+ public IWizardPage getPreviousPage(IWizardPage page) {
+ if (page == newDriverPage) {
+ return firstPage; // newDriverPage, if it appears, is the second
+ // page
+ } else {
+ return super.getPreviousPage(page);
+ }
+ }
+
+ static class HadoopFirstPage extends WizardNewProjectCreationPage implements SelectionListener {
+ public HadoopFirstPage() throws CoreException {
+ super("New Hadoop Project");
+ setImageDescriptor(ImageLibrary.get("wizard.mapreduce.project.new"));
+ String prefVersion = Activator.getDefault().getPreferenceStore().getString(PreferenceConstants.P_VERSION);
+ prefVersion = prefVersion != null && !prefVersion.isEmpty() ? prefVersion :
+ HadoopVersion.Version1.getDisplayName();
+ homeReader = AbstractHadoopHomeReader.createReader(prefVersion);
+ }
+
+ private Link openPreferences;
+
+ private Button workspaceHadoop;
+
+ private Button projectHadoop;
+
+ private Text location;
+
+ private Button browse;
+
+ private String path;
+
+ public String currentPath;
+
+ AbstractHadoopHomeReader homeReader;
+
+ private Combo hadoopVersion;
+
+ private String hadoopVersionText;
+
+ // private Button generateDriver;
+
+ @Override
+ public void createControl(Composite parent) {
+ super.createControl(parent);
+
+ setTitle("MapReduce Project");
+ setDescription("Create a MapReduce project.");
+
+ Group group = new Group((Composite) getControl(), SWT.NONE);
+ group.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+ group.setText("Hadoop MapReduce Library Installation Path");
+ GridLayout layout = new GridLayout(3, true);
+ layout.marginLeft = convertHorizontalDLUsToPixels(IDialogConstants.HORIZONTAL_MARGIN);
+ layout.marginRight = convertHorizontalDLUsToPixels(IDialogConstants.HORIZONTAL_MARGIN);
+ layout.marginTop = convertHorizontalDLUsToPixels(IDialogConstants.VERTICAL_MARGIN);
+ layout.marginBottom = convertHorizontalDLUsToPixels(IDialogConstants.VERTICAL_MARGIN);
+ group.setLayout(layout);
+
+ workspaceHadoop = new Button(group, SWT.RADIO);
+ GridData d = new GridData(GridData.BEGINNING, GridData.BEGINNING, false, false);
+ d.horizontalSpan = 2;
+ workspaceHadoop.setLayoutData(d);
+ // workspaceHadoop.setText("Use default workbench Hadoop library
+ // location");
+ workspaceHadoop.setSelection(true);
+
+ updateHadoopDirLabelFromPreferences();
+
+ openPreferences = new Link(group, SWT.NONE);
+ openPreferences.setText("<a>Configure Hadoop install directory...</a>");
+ openPreferences.setLayoutData(new GridData(GridData.END, GridData.CENTER, false, false));
+ openPreferences.addSelectionListener(this);
+
+ projectHadoop = new Button(group, SWT.RADIO);
+ projectHadoop.setLayoutData(new GridData(GridData.BEGINNING, GridData.CENTER, false, false));
+ projectHadoop.setText("Specify Hadoop library location");
+
+ location = new Text(group, SWT.SINGLE | SWT.BORDER);
+ location.setText("");
+ d = new GridData(GridData.END, GridData.CENTER, true, false);
+ d.horizontalSpan = 1;
+ d.widthHint = 250;
+ d.grabExcessHorizontalSpace = true;
+ location.setLayoutData(d);
+ location.setEnabled(false);
+
+ browse = new Button(group, SWT.NONE);
+ browse.setText("Browse...");
+ browse.setLayoutData(new GridData(GridData.BEGINNING, GridData.CENTER, false, false));
+ browse.setEnabled(false);
+ browse.addSelectionListener(this);
+
+ /*
+ * HDFS version
+ */
+ {
+ Label label = new Label(group, SWT.NONE);
+ label.setText("&Hadoop Version:");
+ Combo options = new Combo(group, SWT.SINGLE | SWT.BORDER | SWT.READ_ONLY);
+ options.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
+ for (HadoopVersion ver : HadoopVersion.values()) {
+ options.add(ver.getDisplayName());
+ }
+ options.addListener(SWT.Selection, new Listener() {
+ public void handleEvent(Event e) {
+ try {
+ if (!hadoopVersionText.equalsIgnoreCase(hadoopVersion.getText())) {
+ homeReader = AbstractHadoopHomeReader.createReader(hadoopVersion.getText());
+ hadoopVersionText = hadoopVersion.getText();
+ getContainer().updateButtons();
+ }
+ } catch (CoreException e1) {
+ e1.printStackTrace();
+ }
+ }
+
+ });
+
+ hadoopVersion = options;
+ if (hadoopVersionText == null || hadoopVersionText.isEmpty())
+ hadoopVersionText = HadoopVersion.Version1.getDisplayName();
+
+ int pos = 0;
+ for (String item : options.getItems()) {
+ if (item.equalsIgnoreCase(hadoopVersionText)) {
+ options.select(pos);
+ break;
+ }
+ pos++;
+ }
+ options.setEnabled(false);
+ }
+
+ projectHadoop.addSelectionListener(this);
+ workspaceHadoop.addSelectionListener(this);
+
+ // generateDriver = new Button((Composite) getControl(), SWT.CHECK);
+ // generateDriver.setText("Generate a MapReduce driver");
+ // generateDriver.addListener(SWT.Selection, new Listener()
+ // {
+ // public void handleEvent(Event event) {
+ // getContainer().updateButtons(); }
+ // });
+ }
+
+ @Override
+ public boolean isPageComplete() {
+ boolean validHadoop = validateHadoopLocation();
+
+ if (!validHadoop && isCurrentPage()) {
+ setErrorMessage("Invalid Hadoop Runtime specified; please click 'Configure Hadoop install directory' or fill in library location input field");
+ } else {
+ setErrorMessage(null);
+ }
+
+ return super.isPageComplete() && validHadoop;
+ }
+
+ private boolean validateHadoopLocation() {
+ if (workspaceHadoop.getSelection()) {
+ this.currentPath = path;
+ return homeReader.validateHadoopHome(new Path(path).toFile());
+ } else {
+ this.currentPath = location.getText();
+ return homeReader.validateHadoopHome(new Path(location.getText()).toFile());
+ }
+ }
+
+ private void updateHadoopDirLabelFromPreferences() {
+ path = Activator.getDefault().getPreferenceStore().getString(PreferenceConstants.P_PATH);
+ hadoopVersionText = Activator.getDefault().getPreferenceStore().getString(PreferenceConstants.P_VERSION);
+
+ if ((path != null) && (path.length() > 0)) {
+ workspaceHadoop.setText("Use default Hadoop");
+ } else {
+ workspaceHadoop.setText("Use default Hadoop (currently not set)");
+ }
+ }
+
+ public void widgetDefaultSelected(SelectionEvent e) {
+ }
+
+ public void widgetSelected(SelectionEvent e) {
+ if (e.getSource() == openPreferences) {
+ PreferenceManager manager = new PreferenceManager();
+ manager.addToRoot(new PreferenceNode("Hadoop Installation Directory", new MapReducePreferencePage()));
+ PreferenceDialog dialog = new PreferenceDialog(this.getShell(), manager);
+ dialog.create();
+ dialog.setMessage("Select Hadoop Installation Directory");
+ dialog.setBlockOnOpen(true);
+ dialog.open();
+
+ updateHadoopDirLabelFromPreferences();
+ } else if (e.getSource() == browse) {
+ DirectoryDialog dialog = new DirectoryDialog(this.getShell());
+ dialog.setMessage("Select a hadoop installation, containing hadoop-X-core.jar");
+ dialog.setText("Select Hadoop Installation Directory");
+ String directory = dialog.open();
+
+ if (directory != null) {
+ location.setText(directory);
+
+ if (!validateHadoopLocation()) {
+ setErrorMessage("No Hadoop jar found in specified directory");
+ } else {
+ setErrorMessage(null);
+ }
+ }
+ } else if (projectHadoop.getSelection()) {
+ location.setEnabled(true);
+ browse.setEnabled(true);
+ hadoopVersion.setEnabled(true);
+ } else {
+ location.setEnabled(false);
+ browse.setEnabled(false);
+ hadoopVersion.setEnabled(false);
+ }
+
+ getContainer().updateButtons();
+ }
+ }
+
+ @Override
+ public void addPages() {
+ /*
+ * firstPage = new HadoopFirstPage(); addPage(firstPage ); addPage( new
+ * JavaProjectWizardSecondPage(firstPage) );
+ */
+
+ try {
+ firstPage = new HadoopFirstPage();
+ } catch (CoreException e) {
+ e.printStackTrace();
+ }
+ javaPage = new NewJavaProjectWizardPage(ResourcesPlugin.getWorkspace().getRoot(), firstPage);
+ // newDriverPage = new NewDriverWizardPage(false);
+ // newDriverPage.setPageComplete(false); // ensure finish button
+ // initially disabled
+ addPage(firstPage);
+ addPage(javaPage);
+
+ // addPage(newDriverPage);
+ }
+
+ @Override
+ public boolean performFinish() {
+ try {
+ PlatformUI.getWorkbench().getProgressService().runInUI(this.getContainer(), new IRunnableWithProgress() {
+ public void run(IProgressMonitor monitor) {
+ try {
+ monitor.beginTask("Create Hadoop Project", 300);
+
+ javaPage.getRunnable().run(new SubProgressMonitor(monitor, 100));
+
+ // if( firstPage.generateDriver.getSelection())
+ // {
+ // newDriverPage.setPackageFragmentRoot(javaPage.getNewJavaProject().getAllPackageFragmentRoots()[0],
+ // false);
+ // newDriverPage.getRunnable().run(new
+ // SubProgressMonitor(monitor,100));
+ // }
+
+ IProject project = javaPage.getNewJavaProject().getResource().getProject();
+ IProjectDescription description = project.getDescription();
+ String[] existingNatures = description.getNatureIds();
+ String[] natures = new String[existingNatures.length + 1];
+ for (int i = 0; i < existingNatures.length; i++) {
+ natures[i + 1] = existingNatures[i];
+ }
+
+ natures[0] = MapReduceNature.ID;
+ description.setNatureIds(natures);
+
+ project.setPersistentProperty(new QualifiedName(Activator.PLUGIN_ID, "hadoop.runtime.path"), firstPage.currentPath);
+ project.setPersistentProperty(new QualifiedName(Activator.PLUGIN_ID, "hadoop.version"), firstPage.hadoopVersionText);
+ project.setDescription(description, new NullProgressMonitor());
+
+ String[] natureIds = project.getDescription().getNatureIds();
+ for (int i = 0; i < natureIds.length; i++) {
+ log.fine("Nature id # " + i + " > " + natureIds[i]);
+ }
+
+ monitor.worked(100);
+ monitor.done();
+
+ BasicNewProjectResourceWizard.updatePerspective(config);
+ } catch (CoreException e) {
+ // TODO Auto-generated catch block
+ log.log(Level.SEVERE, "CoreException thrown.", e);
+ } catch (InvocationTargetException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }, null);
+ } catch (InvocationTargetException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+
+ return true;
+ }
+
+ public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+ this.config = config;
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapperWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapperWizard.java
new file mode 100644
index 0000000..b15bfda
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewMapperWizard.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Mapper class (a class that runs the Map portion of
+ * a MapReduce job). The class is pre-filled with a template.
+ *
+ */
+
+public class NewMapperWizard extends NewElementWizard implements INewWizard, IRunnableWithProgress {
+ private Page page;
+
+ public NewMapperWizard() {
+ setWindowTitle("New Mapper");
+ }
+
+ public void run(IProgressMonitor monitor) {
+ try {
+ page.createType(monitor);
+ } catch (CoreException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void init(IWorkbench workbench, IStructuredSelection selection) {
+ super.init(workbench, selection);
+
+ page = new Page();
+ addPage(page);
+ page.setSelection(selection);
+ }
+
+ public static class Page extends NewTypeWizardPage {
+ private Button isCreateMapMethod;
+
+ public Page() {
+ super(true, "Mapper");
+
+ setTitle("Mapper");
+ setDescription("Create a new Mapper implementation.");
+ setImageDescriptor(ImageLibrary.get("wizard.mapper.new"));
+ }
+
+ public void setSelection(IStructuredSelection selection) {
+ initContainerPage(getInitialJavaElement(selection));
+ initTypePage(getInitialJavaElement(selection));
+ }
+
+ @Override
+ public void createType(IProgressMonitor monitor) throws CoreException, InterruptedException {
+ super.createType(monitor);
+ }
+
+ @Override
+ protected void createTypeMembers(IType newType, ImportsManager imports, IProgressMonitor monitor) throws CoreException {
+ super.createTypeMembers(newType, imports, monitor);
+ imports.addImport("java.io.IOException");
+ imports.addImport("org.apache.hadoop.io.Text");
+ imports.addImport("org.apache.hadoop.io.IntWritable");
+ imports.addImport("org.apache.hadoop.io.LongWritable");
+ imports.addImport("org.apache.hadoop.mapreduce.Mapper");
+ newType.createMethod("public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException \n{\n}\n", null,
+ false, monitor);
+ }
+
+ public void createControl(Composite parent) {
+ // super.createControl(parent);
+
+ initializeDialogUnits(parent);
+ Composite composite = new Composite(parent, SWT.NONE);
+ GridLayout layout = new GridLayout();
+ layout.numColumns = 4;
+ composite.setLayout(layout);
+
+ createContainerControls(composite, 4);
+ createPackageControls(composite, 4);
+ createSeparator(composite, 4);
+ createTypeNameControls(composite, 4);
+ createSuperClassControls(composite, 4);
+ createSuperInterfacesControls(composite, 4);
+ // createSeparator(composite, 4);
+
+ setControl(composite);
+
+ setSuperClass("org.apache.hadoop.mapreduce.Mapper<LongWritable, Text, Text, IntWritable>", true);
+
+ setFocus();
+ validate();
+ }
+
+ @Override
+ protected void handleFieldChanged(String fieldName) {
+ super.handleFieldChanged(fieldName);
+
+ validate();
+ }
+
+ private void validate() {
+ updateStatus(new IStatus[] { fContainerStatus, fPackageStatus, fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+ }
+ }
+
+ @Override
+ public boolean performFinish() {
+ if (super.performFinish()) {
+ if (getCreatedElement() != null) {
+ openResource((IFile) page.getModifiedResource());
+ selectAndReveal(page.getModifiedResource());
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ protected void finishPage(IProgressMonitor monitor) throws InterruptedException, CoreException {
+ this.run(monitor);
+ }
+
+ @Override
+ public IJavaElement getCreatedElement() {
+ return page.getCreatedType().getPrimaryElement();
+ }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewPartitionerWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewPartitionerWizard.java
new file mode 100644
index 0000000..c09e142
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewPartitionerWizard.java
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import java.util.ArrayList;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Partitioner class (a class that runs the Map portion
+ * of a MapReduce job). The class is pre-filled with a template.
+ *
+ */
+
+public class NewPartitionerWizard extends NewElementWizard implements INewWizard,
+ IRunnableWithProgress {
+ private Page page;
+
+ public NewPartitionerWizard() {
+ setWindowTitle("New Partitioner");
+ }
+
+ public void run(IProgressMonitor monitor) {
+ try {
+ page.createType(monitor);
+ } catch (CoreException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void init(IWorkbench workbench, IStructuredSelection selection) {
+ super.init(workbench, selection);
+
+ page = new Page();
+ addPage(page);
+ page.setSelection(selection);
+ }
+
+ public static class Page extends NewTypeWizardPage {
+ private Button isCreateMapMethod;
+
+ public Page() {
+ super(true, "Partitioner");
+
+ setTitle("Partitioner");
+ setDescription("Create a new Partitioner implementation.");
+ setImageDescriptor(ImageLibrary.get("wizard.partitioner.new"));
+ }
+
+ public void setSelection(IStructuredSelection selection) {
+ initContainerPage(getInitialJavaElement(selection));
+ initTypePage(getInitialJavaElement(selection));
+ }
+
+ @Override
+ public void createType(IProgressMonitor monitor) throws CoreException,
+ InterruptedException {
+ super.createType(monitor);
+ }
+
+ @Override
+ protected void createTypeMembers(IType newType, ImportsManager imports,
+ IProgressMonitor monitor) throws CoreException {
+ super.createTypeMembers(newType, imports, monitor);
+ imports.addImport("java.util.HashMap");
+ imports.addImport("org.apache.hadoop.io.Text");
+ imports.addImport("org.apache.hadoop.conf.Configurable");
+ imports.addImport("org.apache.hadoop.conf.Configuration");
+ imports.addImport("org.apache.hadoop.mapreduce.Partitioner");
+
+
+ newType
+ .createMethod(
+ " @Override\n" +
+ " public Configuration getConf() { \n" +
+ " // TODO Auto-generated method stub \n" +
+ " return null;\n" +
+ " }\n\n" +
+ " @Override\n" +
+ " public void setConf(Configuration conf) {\n" +
+ " // TODO Auto-generated method stub\n" +
+ " }\n\n" +
+ " @Override\n" +
+ " public int getPartition(Text key, Text value, int nr) { \n" +
+ " // TODO Auto-generated method stub \n" +
+ " return 0; \n" +
+ " }\n", null, false,
+ monitor);
+ }
+
+ public void createControl(Composite parent) {
+ // super.createControl(parent);
+
+ initializeDialogUnits(parent);
+ Composite composite = new Composite(parent, SWT.NONE);
+ GridLayout layout = new GridLayout();
+ layout.numColumns = 4;
+ composite.setLayout(layout);
+
+ createContainerControls(composite, 4);
+ createPackageControls(composite, 4);
+ createSeparator(composite, 4);
+ createTypeNameControls(composite, 4);
+ createSuperClassControls(composite, 4);
+ createSuperInterfacesControls(composite, 4);
+ // createSeparator(composite, 4);
+
+ setControl(composite);
+
+ setSuperClass("org.apache.hadoop.mapreduce.Partitioner<Text, Text>", true);
+ ArrayList al = new ArrayList();
+ al.add("org.apache.hadoop.conf.Configurable");
+ setSuperInterfaces(al, true);
+
+ setFocus();
+ validate();
+ }
+
+ @Override
+ protected void handleFieldChanged(String fieldName) {
+ super.handleFieldChanged(fieldName);
+
+ validate();
+ }
+
+ private void validate() {
+ updateStatus(new IStatus[] { fContainerStatus, fPackageStatus,
+ fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+ }
+ }
+
+ @Override
+ public boolean performFinish() {
+ if (super.performFinish()) {
+ if (getCreatedElement() != null) {
+ openResource((IFile) page.getModifiedResource());
+ selectAndReveal(page.getModifiedResource());
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ protected void finishPage(IProgressMonitor monitor)
+ throws InterruptedException, CoreException {
+ this.run(monitor);
+ }
+
+ @Override
+ public IJavaElement getCreatedElement() {
+ return page.getCreatedType().getPrimaryElement();
+ }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewReducerWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewReducerWizard.java
new file mode 100644
index 0000000..da514e4
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/mr/NewReducerWizard.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.internal.mr;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.eclipse.core.resources.IFile;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.internal.ui.wizards.NewElementWizard;
+import org.eclipse.jdt.ui.wizards.NewTypeWizardPage;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.ui.INewWizard;
+import org.eclipse.ui.IWorkbench;
+
+/**
+ * Wizard for creating a new Reducer class (a class that runs the Reduce
+ * portion of a MapReduce job). The class is pre-filled with a template.
+ *
+ */
+
+public class NewReducerWizard extends NewElementWizard implements
+ INewWizard, IRunnableWithProgress {
+ private Page page;
+
+ public NewReducerWizard() {
+ setWindowTitle("New Reducer");
+ }
+
+ public void run(IProgressMonitor monitor) {
+ try {
+ page.createType(monitor);
+ } catch (CoreException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void init(IWorkbench workbench, IStructuredSelection selection) {
+ super.init(workbench, selection);
+
+ page = new Page();
+ addPage(page);
+ page.setSelection(selection);
+ }
+
+ public static class Page extends NewTypeWizardPage {
+ public Page() {
+ super(true, "Reducer");
+
+ setTitle("Reducer");
+ setDescription("Create a new Reducer implementation.");
+ setImageDescriptor(ImageLibrary.get("wizard.reducer.new"));
+ }
+
+ public void setSelection(IStructuredSelection selection) {
+ initContainerPage(getInitialJavaElement(selection));
+ initTypePage(getInitialJavaElement(selection));
+ }
+
+ @Override
+ public void createType(IProgressMonitor monitor) throws CoreException,
+ InterruptedException {
+ super.createType(monitor);
+ }
+
+ @Override
+ protected void createTypeMembers(IType newType, ImportsManager imports,
+ IProgressMonitor monitor) throws CoreException {
+ super.createTypeMembers(newType, imports, monitor);
+ imports.addImport("java.io.IOException");
+ imports.addImport("org.apache.hadoop.mapreduce.Reducer");
+ imports.addImport("org.apache.hadoop.io.Text");
+ imports.addImport("org.apache.hadoop.io.IntWritable");
+ newType
+ .createMethod(
+ "public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException \n{\n"
+
+ + "\twhile (values.iterator().hasNext()) {\n"
+ + "\t\t// replace ValueType with the real type of your value\n"
+
+ + "\t\t// process value\n" + "\t}\n" + "}\n", null, false,
+ monitor);
+ }
+
+ public void createControl(Composite parent) {
+ // super.createControl(parent);
+
+ initializeDialogUnits(parent);
+ Composite composite = new Composite(parent, SWT.NONE);
+ GridLayout layout = new GridLayout();
+ layout.numColumns = 4;
+ composite.setLayout(layout);
+
+ createContainerControls(composite, 4);
+ createPackageControls(composite, 4);
+ createSeparator(composite, 4);
+ createTypeNameControls(composite, 4);
+ createSuperClassControls(composite, 4);
+ createSuperInterfacesControls(composite, 4);
+ // createSeparator(composite, 4);
+
+ setControl(composite);
+
+ setSuperClass("org.apache.hadoop.mapreduce.Reducer<Text, IntWritable, Text, IntWritable>", true);
+
+ setFocus();
+ validate();
+ }
+
+ @Override
+ protected void handleFieldChanged(String fieldName) {
+ super.handleFieldChanged(fieldName);
+
+ validate();
+ }
+
+ private void validate() {
+ updateStatus(new IStatus[] { fContainerStatus, fPackageStatus,
+ fTypeNameStatus, fSuperClassStatus, fSuperInterfacesStatus });
+ }
+ }
+
+ @Override
+ public boolean performFinish() {
+ if (super.performFinish()) {
+ if (getCreatedElement() != null) {
+ selectAndReveal(page.getModifiedResource());
+ openResource((IFile) page.getModifiedResource());
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ protected void finishPage(IProgressMonitor monitor)
+ throws InterruptedException, CoreException {
+ this.run(monitor);
+ }
+
+ @Override
+ public IJavaElement getCreatedElement() {
+ return (page.getCreatedType() == null) ? null : page.getCreatedType()
+ .getPrimaryElement();
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
index 599c011..0147b6b 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DeleteAction.java
@@ -18,7 +18,6 @@
*/
package org.apache.hdt.ui.internal.zookeeper;
-import java.io.IOException;
import java.util.Iterator;
import org.apache.hdt.core.internal.model.ZNode;
@@ -27,9 +26,12 @@
import org.apache.hdt.core.zookeeper.ZooKeeperClient;
import org.apache.log4j.Logger;
import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IStatus;
import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.viewers.ISelection;
import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.IObjectActionDelegate;
import org.eclipse.ui.IWorkbenchPart;
import org.eclipse.ui.navigator.resources.ProjectExplorer;
@@ -39,7 +41,12 @@
private final static Logger logger = Logger.getLogger(DeleteAction.class);
private ISelection selection;
private IWorkbenchPart targetPart;
-
+
+
+ private void showError(String message) {
+ MessageDialog.openError(Display.getDefault().getActiveShell(),
+ "ZooKeeper Delete Error", message);
+ }
/*
* (non-Javadoc)
*
@@ -47,48 +54,54 @@
*/
@Override
public void run(IAction action) {
- if (this.selection != null && !this.selection.isEmpty()) {
- IStructuredSelection sSelection = (IStructuredSelection) this.selection;
- @SuppressWarnings("rawtypes")
- Iterator itr = sSelection.iterator();
- while (itr.hasNext()) {
- Object object = itr.next();
- if (object instanceof ZooKeeperServer) {
- ZooKeeperServer r = (ZooKeeperServer) object;
- if (logger.isDebugEnabled())
- logger.debug("Deleting: " + r);
- try {
- ZooKeeperManager.INSTANCE.disconnect(r);
- } finally {
+ Display.getDefault().syncExec(new Runnable() {
+ @Override
+ public void run() {
+ if (selection != null && !selection.isEmpty()) {
+ IStructuredSelection sSelection = (IStructuredSelection) selection;
+ @SuppressWarnings("rawtypes")
+ Iterator itr = sSelection.iterator();
+ while (itr.hasNext()) {
+ Object object = itr.next();
+ if (object instanceof ZooKeeperServer) {
+ ZooKeeperServer r = (ZooKeeperServer) object;
+ if (logger.isDebugEnabled())
+ logger.debug("Deleting: " + r);
try {
- ZooKeeperManager.INSTANCE.delete(r);
+ ZooKeeperManager.INSTANCE.disconnect(r);
} catch (CoreException e) {
- logger.error(e.getMessage());
+ logger.error("Error occurred ", e);
+ } finally {
+ try {
+ ZooKeeperManager.INSTANCE.delete(r);
+ } catch (CoreException e) {
+ logger.error("Error occurred ", e);
+ IStatus status = e.getStatus();
+ showError(status.getException().getMessage());
+ }
+ }
+ if (logger.isDebugEnabled())
+ logger.debug("Deleted: " + r);
+ if (targetPart instanceof ProjectExplorer) {
+ ProjectExplorer pe = (ProjectExplorer) targetPart;
+ pe.getCommonViewer().refresh();
+ }
+ } else if (object instanceof ZNode) {
+ ZNode zkn = (ZNode) object;
+ if (logger.isDebugEnabled())
+ logger.debug("Deleting: " + zkn);
+ try {
+ ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
+ client.delete(zkn);
+ } catch (Exception e) {
+ logger.error("Error occurred ", e);
+ showError(e.getMessage());
}
}
- if (logger.isDebugEnabled())
- logger.debug("Deleted: " + r);
- if (targetPart instanceof ProjectExplorer) {
- ProjectExplorer pe = (ProjectExplorer) targetPart;
- pe.getCommonViewer().refresh();
- }
- } else if (object instanceof ZNode) {
- ZNode zkn = (ZNode) object;
- if (logger.isDebugEnabled())
- logger.debug("Deleting: " + zkn);
- try {
- ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
- client.delete(zkn);
- } catch (CoreException e) {
- logger.error(e.getMessage(), e);
- } catch (IOException e) {
- logger.error(e.getMessage(), e);
- } catch (InterruptedException e) {
- logger.error(e.getMessage(), e);
- }
}
- }
- }
+ }}
+ });
+
}
/*
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
index d335c79..af293c5 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/DisconnectAction.java
@@ -24,9 +24,12 @@
import org.apache.hdt.core.internal.model.ZooKeeperServer;
import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.viewers.ISelection;
import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.IObjectActionDelegate;
import org.eclipse.ui.IWorkbenchPart;
import org.eclipse.ui.navigator.resources.ProjectExplorer;
@@ -37,6 +40,10 @@
private ISelection selection;
private IWorkbenchPart targetPart;
+ private void showError(String message) {
+ MessageDialog.openError(Display.getDefault().getActiveShell(),
+ "ZooKeeper Disconnect Error",message);
+ }
/*
* (non-Javadoc)
*
@@ -44,26 +51,33 @@
*/
@Override
public void run(IAction action) {
- if (this.selection != null && !this.selection.isEmpty()) {
- IStructuredSelection sSelection = (IStructuredSelection) this.selection;
- @SuppressWarnings("rawtypes")
- Iterator itr = sSelection.iterator();
- while (itr.hasNext()) {
- Object object = itr.next();
- if (object instanceof ZooKeeperServer) {
- ZooKeeperServer r = (ZooKeeperServer) object;
- if(logger.isDebugEnabled())
- logger.debug("Disconnecting: "+r);
- ZooKeeperManager.INSTANCE.disconnect(r);
- if(logger.isDebugEnabled())
- logger.debug("Disconnected: "+r);
- if (targetPart instanceof ProjectExplorer) {
- ProjectExplorer pe = (ProjectExplorer) targetPart;
- pe.getCommonViewer().refresh(r, true);
+ Display.getDefault().syncExec(new Runnable() {
+ @Override
+ public void run() {
+ if (selection != null && !selection.isEmpty()) {
+ IStructuredSelection sSelection = (IStructuredSelection) selection;
+ @SuppressWarnings("rawtypes")
+ Iterator itr = sSelection.iterator();
+ while (itr.hasNext()) {
+ Object object = itr.next();
+ if (object instanceof ZooKeeperServer) {
+ ZooKeeperServer r = (ZooKeeperServer) object;
+ if(logger.isDebugEnabled())
+ logger.debug("Disconnecting: "+r);
+ try {
+ ZooKeeperManager.INSTANCE.disconnect(r);
+ } catch (CoreException e) {
+ logger.error("Error occurred ", e);
+ showError(e.getStatus().getException().getMessage());
+ }
+ if(logger.isDebugEnabled())
+ logger.debug("Disconnected: "+r);
+ if (targetPart instanceof ProjectExplorer) {
+ ProjectExplorer pe = (ProjectExplorer) targetPart;
+ pe.getCommonViewer().refresh(r, true);
+ }
}
- }
- }
- }
+ }}}});
}
/*
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
index 405773a..9a8e7c0 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/NewZooKeeperWizard.java
@@ -19,18 +19,28 @@
import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.internal.launch.HadoopLocationWizard;
+import org.apache.hdt.ui.internal.launch.ServerRegistry;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IConfigurationElement;
+import org.eclipse.core.runtime.IExecutableExtension;
+import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.Status;
import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.preference.IPreferenceStore;
import org.eclipse.jface.viewers.IStructuredSelection;
import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.INewWizard;
import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.wizards.newresource.BasicNewProjectResourceWizard;
-public class NewZooKeeperWizard extends Wizard implements INewWizard {
+public class NewZooKeeperWizard extends Wizard implements INewWizard,IExecutableExtension {
//private static Logger logger = Logger.getLogger(NewZooKeeperWizard.class);
private NewZooKeeperServerWizardPage serverLocationWizardPage = null;
+ private IConfigurationElement configElement;
public NewZooKeeperWizard() {
}
@@ -55,6 +65,11 @@
@Override
public boolean performFinish() {
+ Display.getDefault().syncExec(new Runnable() {
+ public void run() {
+ BasicNewProjectResourceWizard.updatePerspective(configElement);
+ }
+ });
if (serverLocationWizardPage != null) {
String ambariUrl = serverLocationWizardPage.getZkServerLocation();
if (ambariUrl != null) {
@@ -67,7 +82,15 @@
Job j = new Job("Creating ZooKeeper project [" + serverLocationWizardPage.getZkServerName() + "]") {
protected org.eclipse.core.runtime.IStatus run(org.eclipse.core.runtime.IProgressMonitor monitor) {
- ZooKeeperManager.INSTANCE.createServer(serverLocationWizardPage.getZkServerName(), serverLocationWizardPage.getZkServerLocation());
+ try {
+ ZooKeeperManager.INSTANCE.createServer(serverLocationWizardPage.getZkServerName(), serverLocationWizardPage.getZkServerLocation());
+ } catch (final CoreException e) {
+ Display.getDefault().syncExec(new Runnable(){
+ public void run(){
+ IStatus status = e.getStatus();
+ MessageDialog.openError(Display.getDefault().getActiveShell(),
+ "ZooKeeper Error", status.getMessage()+" "+status.getException().getMessage());}});
+ }
return Status.OK_STATUS;
};
};
@@ -78,4 +101,13 @@
return false;
}
+ /* (non-Javadoc)
+ * @see org.eclipse.core.runtime.IExecutableExtension#setInitializationData
+ * (org.eclipse.core.runtime.IConfigurationElement, java.lang.String, java.lang.Object)
+ */
+ @Override
+ public void setInitializationData(IConfigurationElement config, String propertyName, Object data) throws CoreException {
+ this.configElement=config;
+ }
+
}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
index 17d228c..e5905bc 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ReconnectAction.java
@@ -24,9 +24,13 @@
import org.apache.hdt.core.internal.model.ZooKeeperServer;
import org.apache.hdt.core.internal.zookeeper.ZooKeeperManager;
import org.apache.log4j.Logger;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.IStatus;
import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.viewers.ISelection;
import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.IObjectActionDelegate;
import org.eclipse.ui.IWorkbenchPart;
import org.eclipse.ui.navigator.resources.ProjectExplorer;
@@ -37,6 +41,10 @@
private ISelection selection;
private IWorkbenchPart targetPart;
+ private void showError(String message) {
+ MessageDialog.openError(Display.getDefault().getActiveShell(),
+ "ZooKeeper Re-connect Error", message);
+ }
/*
* (non-Javadoc)
*
@@ -44,26 +52,34 @@
*/
@Override
public void run(IAction action) {
- if (this.selection != null && !this.selection.isEmpty()) {
- IStructuredSelection sSelection = (IStructuredSelection) this.selection;
- @SuppressWarnings("rawtypes")
- Iterator itr = sSelection.iterator();
- while (itr.hasNext()) {
- Object object = itr.next();
- if (object instanceof ZooKeeperServer) {
- ZooKeeperServer r = (ZooKeeperServer) object;
- if(logger.isDebugEnabled())
- logger.debug("Reconnecting: "+r);
- ZooKeeperManager.INSTANCE.reconnect(r);
- if(logger.isDebugEnabled())
- logger.debug("Reconnected: "+r);
- if (targetPart instanceof ProjectExplorer) {
- ProjectExplorer pe = (ProjectExplorer) targetPart;
- pe.getCommonViewer().refresh(r, true);
+ Display.getDefault().syncExec(new Runnable() {
+ @Override
+ public void run() {
+ if (selection != null && !selection.isEmpty()) {
+ IStructuredSelection sSelection = (IStructuredSelection) selection;
+ @SuppressWarnings("rawtypes")
+ Iterator itr = sSelection.iterator();
+ while (itr.hasNext()) {
+ Object object = itr.next();
+ if (object instanceof ZooKeeperServer) {
+ ZooKeeperServer r = (ZooKeeperServer) object;
+ if(logger.isDebugEnabled())
+ logger.debug("Reconnecting: "+r);
+ try {
+ ZooKeeperManager.INSTANCE.reconnect(r);
+ } catch (CoreException e) {
+ logger.error("Error occurred ", e);
+ IStatus status = e.getStatus();
+ showError(status.getException().getMessage());
+ }
+ if(logger.isDebugEnabled())
+ logger.debug("Reconnected: "+r);
+ if (targetPart instanceof ProjectExplorer) {
+ ProjectExplorer pe = (ProjectExplorer) targetPart;
+ pe.getCommonViewer().refresh(r, true);
+ }
}
- }
- }
- }
+ }}}});
}
/*
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
index 1579846..0c816e3 100644
--- a/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/internal/zookeeper/ZooKeeperCommonContentProvider.java
@@ -32,6 +32,7 @@
import org.eclipse.core.runtime.CoreException;
import org.eclipse.emf.common.notify.Notification;
import org.eclipse.emf.ecore.util.EContentAdapter;
+import org.eclipse.jface.dialogs.MessageDialog;
import org.eclipse.jface.viewers.Viewer;
import org.eclipse.swt.widgets.Display;
import org.eclipse.ui.IMemento;
@@ -91,12 +92,11 @@
ZooKeeperClient client = ZooKeeperManager.INSTANCE.getClient(zkn.getServer());
List<ZNode> zkChildren = client.getChildren(zkn);
return zkChildren.toArray();
- } catch (CoreException e) {
+ } catch (Exception e) {
logger.error("Error getting children of node", e);
- } catch (IOException e) {
- logger.error("Error getting children of node", e);
- } catch (InterruptedException e) {
- logger.error("Error getting children of node", e);
+ MessageDialog.openError(Display.getDefault().getActiveShell(),
+ "ZooKeeper Error",e.getMessage());
+
}
}
return null;
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
new file mode 100644
index 0000000..240fc64
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/MapReducePreferencePage.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hdt.ui.preferences;
+
+import org.apache.hdt.core.AbstractHadoopHomeReader;
+import org.apache.hdt.core.HadoopVersion;
+import org.apache.hdt.ui.Activator;
+import org.eclipse.core.runtime.CoreException;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jface.preference.ComboFieldEditor;
+import org.eclipse.jface.preference.DirectoryFieldEditor;
+import org.eclipse.jface.preference.FieldEditor;
+import org.eclipse.jface.preference.FieldEditorPreferencePage;
+import org.eclipse.jface.preference.StringFieldEditor;
+import org.eclipse.jface.util.PropertyChangeEvent;
+import org.eclipse.ui.IWorkbench;
+import org.eclipse.ui.IWorkbenchPreferencePage;
+
+/**
+ * This class represents a preference page that is contributed to the
+ * Preferences dialog. By sub-classing <tt>FieldEditorPreferencePage</tt>, we
+ * can use the field support built into JFace that allows us to create a page
+ * that is small and knows how to save, restore and apply itself.
+ *
+ * <p>
+ * This page is used to modify preferences only. They are stored in the
+ * preference store that belongs to the main plug-in class. That way,
+ * preferences can be accessed directly via the preference store.
+ */
+
+public class MapReducePreferencePage extends FieldEditorPreferencePage implements IWorkbenchPreferencePage {
+
+ private StringFieldEditor hadoopHomeDirEditor;
+ private ComboFieldEditor hadoopVersionEditor;
+ private String hadoopVersionValue;
+ private String hadoopHomeValue;
+
+ public MapReducePreferencePage() {
+ super(GRID);
+ setPreferenceStore(Activator.getDefault().getPreferenceStore());
+ setTitle("Hadoop Map/Reduce Tools");
+ // setDescription("Hadoop Map/Reduce Preferences");
+ }
+
+ /**
+ * Creates the field editors. Field editors are abstractions of the common
+ * GUI blocks needed to manipulate various types of preferences. Each field
+ * editor knows how to save and restore itself.
+ */
+ @Override
+ public void createFieldEditors() {
+ DirectoryFieldEditor editor = new DirectoryFieldEditor(PreferenceConstants.P_PATH, "&Hadoop installation directory:", getFieldEditorParent());
+ addField(editor);
+ HadoopVersion[] versions = HadoopVersion.values();
+ String[][] values = new String[versions.length][2];
+ int pos = 0;
+ for (HadoopVersion ver : versions) {
+ values[pos][0] = values[pos][1] = ver.getDisplayName();
+ pos++;
+ }
+ ComboFieldEditor options = new ComboFieldEditor(PreferenceConstants.P_VERSION, "&Hadoop Version:", values, getFieldEditorParent());
+ addField(options);
+ hadoopVersionEditor = options;
+ hadoopHomeDirEditor = editor;
+ hadoopVersionValue = HadoopVersion.Version1.getDisplayName();
+ }
+
+ public void propertyChange(PropertyChangeEvent event) {
+ super.propertyChange(event);
+ if (event.getSource().equals(hadoopVersionEditor)) {
+ hadoopVersionValue = event.getNewValue().toString();
+ }
+ if (event.getSource().equals(hadoopHomeDirEditor)) {
+ hadoopHomeValue = event.getNewValue().toString();
+ }
+ if (event.getProperty().equals(FieldEditor.VALUE)) {
+ checkState();
+ }
+ }
+
+ @Override
+ protected void checkState() {
+ super.checkState();
+ if(hadoopHomeValue==null || hadoopVersionValue==null){
+ setErrorMessage("Please set Hadoop Home/Version.");
+ setValid(false);
+ return;
+ }
+ AbstractHadoopHomeReader homeReader;
+ try {
+ homeReader = AbstractHadoopHomeReader.createReader(hadoopVersionValue);
+ if (!homeReader.validateHadoopHome(new Path(hadoopHomeValue).toFile())) {
+ setErrorMessage("Invalid Hadoop Home.");
+ setValid(false);
+ } else {
+ setErrorMessage(null);
+ setValid(true);
+ }
+ } catch (CoreException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.eclipse.ui.IWorkbenchPreferencePage#init(org.eclipse.ui.IWorkbench)
+ */
+ @Override
+ public void init(IWorkbench workbench) {
+ // TODO Auto-generated method stub
+
+ }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
new file mode 100644
index 0000000..b0bfa48
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/preferences/PreferenceConstants.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.preferences;
+
+/**
+ * Constant definitions for plug-in preferences
+ */
+public class PreferenceConstants {
+
+ public static final String P_PATH = "pathPreference";
+
+ public static final String P_VERSION = "versionPreference";
+
+ // public static final String P_BOOLEAN = "booleanPreference";
+ //
+ // public static final String P_CHOICE = "choicePreference";
+ //
+ // public static final String P_STRING = "stringPreference";
+ //
+}
diff --git a/org.apache.hdt.updateSite/.classpath b/org.apache.hdt.updateSite/.classpath
index 4c2b7c4..36851f4 100644
--- a/org.apache.hdt.updateSite/.classpath
+++ b/org.apache.hdt.updateSite/.classpath
@@ -1,9 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
- <classpathentry kind="src" path="target/maven-shared-archive-resources" excluding="**/*.java"/>
- <classpathentry kind="output" path="target/classes"/>
- <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
- <classpathentry kind="src" path="/org.apache.hdt.core"/>
- <classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
- <classpathentry kind="src" path="/org.apache.hdt.ui"/>
-</classpath>
\ No newline at end of file
+ <classpathentry excluding="**/*.java" kind="src" path="target/maven-shared-archive-resources"/>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+ <classpathentry kind="src" path="/org.apache.hdt.core"/>
+ <classpathentry kind="src" path="/org.apache.hdt.hadoop.release"/>
+ <classpathentry kind="src" path="/org.apache.hdt.ui"/>
+ <classpathentry combineaccessrules="false" kind="src" path="/org.apache.hdt.hadoop2.release"/>
+ <classpathentry kind="output" path="target/classes"/>
+</classpath>
diff --git a/org.apache.hdt.updateSite/.project b/org.apache.hdt.updateSite/.project
index 99c4771..b94eb36 100644
--- a/org.apache.hdt.updateSite/.project
+++ b/org.apache.hdt.updateSite/.project
@@ -6,6 +6,7 @@
<project>org.apache.hdt.core</project>
<project>org.apache.hdt.feature</project>
<project>org.apache.hdt.hadoop.release</project>
+ <project>org.apache.hdt.hadoop2.release</project>
<project>org.apache.hdt.ui</project>
</projects>
<buildSpec>
diff --git a/org.apache.hdt.updateSite/pom.xml b/org.apache.hdt.updateSite/pom.xml
index bffad22..f3a33e8 100644
--- a/org.apache.hdt.updateSite/pom.xml
+++ b/org.apache.hdt.updateSite/pom.xml
@@ -23,7 +23,7 @@
<relativePath>../pom.xml</relativePath>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
</parent>
<artifactId>org.apache.hdt.updateSite</artifactId>
diff --git a/pom.xml b/pom.xml
index b2b1738..3c1ce87 100644
--- a/pom.xml
+++ b/pom.xml
@@ -25,7 +25,7 @@
</parent>
<groupId>org.apache.hdt</groupId>
<artifactId>hdt.master</artifactId>
- <version>0.0.1.incubating</version>
+ <version>0.0.2.incubating</version>
<packaging>pom</packaging>
<name>Apache Hadoop Development Tools</name>
<description>Eclipse tools for developing against the Hadoop platform</description>
@@ -127,6 +127,7 @@
<module>org.apache.hdt.core</module>
<module>org.apache.hdt.ui</module>
<module>org.apache.hdt.hadoop.release</module>
+ <module>org.apache.hdt.hadoop2.release</module>
<module>org.apache.hdt.feature</module>
<module>org.apache.hdt.updateSite</module>
<module>org.apache.hdt.ui.test</module>
@@ -148,6 +149,13 @@
<version>${tycho-version}</version>
<configuration>
<pomDependencies>consider</pomDependencies>
+ <environments>
+ <environment>
+ <os>linux</os>
+ <ws>gtk</ws>
+ <arch>x86_64</arch>
+ </environment>
+ </environments>
</configuration>
</plugin>
@@ -206,7 +214,7 @@
</archive>
</configuration>
</plugin>
- <plugin>
+ <plugin>
<groupId>org.eclipse.tycho</groupId>
<artifactId>tycho-versions-plugin</artifactId>
<version>${tycho-version}</version>