added all the neccesary for the server (now cluster) view in the new
layout
diff --git a/org.apache.hdt.ui/.classpath b/org.apache.hdt.ui/.classpath
index ad32c83..355df07 100644
--- a/org.apache.hdt.ui/.classpath
+++ b/org.apache.hdt.ui/.classpath
@@ -3,5 +3,6 @@
 	<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.6"/>
 	<classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
 	<classpathentry kind="src" path="src"/>
+	<classpathentry kind="lib" path="/Users/amberry/tools/hadoop-1.0.4/hadoop-core-1.0.4.jar"/>
 	<classpathentry kind="output" path="bin"/>
 </classpath>
diff --git a/org.apache.hdt.ui/plugin.xml b/org.apache.hdt.ui/plugin.xml
index 56bcf3a..1beda8c 100644
--- a/org.apache.hdt.ui/plugin.xml
+++ b/org.apache.hdt.ui/plugin.xml
@@ -17,5 +17,21 @@
             name="Hadoop">
       </perspective>
    </extension>
+   <extension
+         point="org.eclipse.ui.views">
+      <category
+            id="org.apache.hdt.ui.views"
+            name="Hadoop">
+      </category>
+      <view
+            allowMultiple="false"
+            category="org.apache.hdt.ui.views"
+            class="org.apache.hdt.ui.views.ClusterView"
+            icon="resources/hadoop-logo-16x16.png"
+            id="org.apache.hdt.ui.ClusterView"
+            name="Hadoop Clusters"
+            restorable="true">
+      </view>
+   </extension>
 
 </plugin>
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
new file mode 100644
index 0000000..6287449
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/EditLocationAction.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.actions;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.apache.hdt.ui.wizards.HadoopLocationWizard;
+import org.apache.hdt.ui.views.ClusterView;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+/**
+ * Editing server properties action
+ */
+public class EditLocationAction extends Action {
+
+  private ClusterView serverView;
+
+  public EditLocationAction(ClusterView serverView) {
+    this.serverView = serverView;
+
+    setText("Edit Hadoop location...");
+    setImageDescriptor(ImageLibrary.get("server.view.action.location.edit"));
+  }
+
+  @Override
+  public void run() {
+
+    final HadoopCluster server = serverView.getSelectedServer();
+    if (server == null)
+      return;
+
+    WizardDialog dialog = new WizardDialog(null, new Wizard() {
+      private HadoopLocationWizard page = new HadoopLocationWizard(server);
+
+      @Override
+      public void addPages() {
+        super.addPages();
+        setWindowTitle("Edit Hadoop location...");
+        addPage(page);
+      }
+
+      @Override
+      public boolean performFinish() {
+        page.performFinish();
+        return true;
+      }
+    });
+
+    dialog.create();
+    dialog.setBlockOnOpen(true);
+    dialog.open();
+
+    super.run();
+  }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/NewLocationAction.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/NewLocationAction.java
new file mode 100644
index 0000000..a7c5c81
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/actions/NewLocationAction.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.actions;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.wizards.HadoopLocationWizard;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.wizard.Wizard;
+import org.eclipse.jface.wizard.WizardDialog;
+
+
+/**
+ * Action corresponding to creating a new MapReduce Server.
+ */
+
+public class NewLocationAction extends Action {
+  public NewLocationAction() {
+    setText("New Hadoop location...");
+    setImageDescriptor(ImageLibrary.get("server.view.action.location.new"));
+  }
+
+  @Override
+  public void run() {
+    WizardDialog dialog = new WizardDialog(null, new Wizard() {
+      private HadoopLocationWizard page = new HadoopLocationWizard();
+
+      @Override
+      public void addPages() {
+        super.addPages();
+        setWindowTitle("New Hadoop location...");
+        addPage(page);
+      }
+
+      @Override
+      public boolean performFinish() {
+        page.performFinish();
+        return true;
+      }
+
+    });
+
+    dialog.create();
+    dialog.setBlockOnOpen(true);
+    dialog.open();
+
+    super.run();
+  }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
new file mode 100644
index 0000000..1468b01
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ConfProp.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+
+public enum ConfProp {
+  /**
+   * Property name for the Hadoop location name
+   */
+  PI_LOCATION_NAME(true, "location.name", "New Hadoop location"),
+
+  /**
+   * Property name for the master host name (the Job tracker)
+   */
+  PI_JOB_TRACKER_HOST(true, "jobtracker.host", "localhost"),
+
+  /**
+   * Property name for the DFS master host name (the Name node)
+   */
+  PI_NAME_NODE_HOST(true, "namenode.host", "localhost"),
+
+  /**
+   * Property name for the installation directory on the master node
+   */
+  // PI_INSTALL_DIR(true, "install.dir", "/dir/hadoop-version/"),
+  /**
+   * User name to use for Hadoop operations
+   */
+  PI_USER_NAME(true, "user.name", System.getProperty("user.name",
+      "who are you?")),
+
+  /**
+   * Property name for SOCKS proxy activation
+   */
+  PI_SOCKS_PROXY_ENABLE(true, "socks.proxy.enable", "no"),
+
+  /**
+   * Property name for the SOCKS proxy host
+   */
+  PI_SOCKS_PROXY_HOST(true, "socks.proxy.host", "host"),
+
+  /**
+   * Property name for the SOCKS proxy port
+   */
+  PI_SOCKS_PROXY_PORT(true, "socks.proxy.port", "1080"),
+
+  /**
+   * TCP port number for the name node
+   */
+  PI_NAME_NODE_PORT(true, "namenode.port", "50040"),
+
+  /**
+   * TCP port number for the job tracker
+   */
+  PI_JOB_TRACKER_PORT(true, "jobtracker.port", "50020"),
+
+  /**
+   * Are the Map/Reduce and the Distributed FS masters hosted on the same
+   * machine?
+   */
+  PI_COLOCATE_MASTERS(true, "masters.colocate", "yes"),
+
+  /**
+   * Property name for naming the job tracker (URI). This property is related
+   * to {@link #PI_MASTER_HOST_NAME}
+   */
+  JOB_TRACKER_URI(false, "mapreduce.jobtracker.address", "localhost:50020"),
+
+  /**
+   * Property name for naming the default file system (URI).
+   */
+  FS_DEFAULT_URI(false, "fs.default.name", "hdfs://localhost:50040/"),
+
+  /**
+   * Property name for the default socket factory:
+   */
+  SOCKET_FACTORY_DEFAULT(false, "hadoop.rpc.socket.factory.class.default",
+      "org.apache.hadoop.net.StandardSocketFactory"),
+
+  /**
+   * Property name for the SOCKS server URI.
+   */
+  SOCKS_SERVER(false, "hadoop.socks.server", "host:1080"),
+
+  ;
+
+  /**
+   * Map <property name> -> ConfProp
+   */
+  private static Map<String, ConfProp> map;
+
+  private static synchronized void registerProperty(String name,
+      ConfProp prop) {
+
+    if (ConfProp.map == null)
+      ConfProp.map = new HashMap<String, ConfProp>();
+
+    ConfProp.map.put(name, prop);
+  }
+
+  public static ConfProp getByName(String propName) {
+    return map.get(propName);
+  }
+
+  public final String name;
+
+  public final String defVal;
+
+  ConfProp(boolean internal, String name, String defVal) {
+    if (internal)
+      name = "eclipse.plug-in." + name;
+    this.name = name;
+    this.defVal = defVal;
+
+    ConfProp.registerProperty(name, this);
+  }
+
+  String get(Configuration conf) {
+    return conf.get(name);
+  }
+
+  void set(Configuration conf, String value) {
+    assert value != null;
+    conf.set(name, value);
+  }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
new file mode 100644
index 0000000..588dd4e
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopCluster.java
@@ -0,0 +1,518 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.logging.Logger;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.cluster.utils.JarModule;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.IStatus;
+import org.eclipse.core.runtime.Status;
+import org.eclipse.core.runtime.jobs.Job;
+import org.eclipse.swt.widgets.Display;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.w3c.dom.Text;
+import org.xml.sax.SAXException;
+
+/**
+ * Representation of a Hadoop location, meaning of the master node (NameNode,
+ * JobTracker).
+ * 
+ * <p>
+ * This class does not create any SSH connection anymore. Tunneling must be
+ * setup outside of Eclipse for now (using Putty or <tt>ssh -D&lt;port&gt;
+ * &lt;host&gt;</tt>)
+ * 
+ * <p>
+ * <em> TODO </em>
+ * <li> Disable the updater if a location becomes unreachable or fails for
+ * tool long
+ * <li> Stop the updater on location's disposal/removal
+ */
+
+public class HadoopCluster {
+
+  /**
+   * Frequency of location status observations expressed as the delay in ms
+   * between each observation
+   * 
+   * TODO Add a preference parameter for this
+   */
+  protected static final long STATUS_OBSERVATION_DELAY = 1500;
+
+  /**
+   * 
+   */
+  public class LocationStatusUpdater extends Job {
+
+    JobClient client = null;
+
+    /**
+     * Setup the updater
+     */
+    public LocationStatusUpdater() {
+      super("Map/Reduce location status updater");
+      this.setSystem(true);
+    }
+
+    /* @inheritDoc */
+    @Override
+    protected IStatus run(IProgressMonitor monitor) {
+      if (client == null) {
+        try {
+          client = HadoopCluster.this.getJobClient();
+
+        } catch (IOException ioe) {
+          client = null;
+          return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
+              "Cannot connect to the Map/Reduce location: "
+                            + HadoopCluster.this.getLocationName(),
+                            ioe);
+        }
+      }
+
+      try {
+        // Set of all known existing Job IDs we want fresh info of
+        Set<JobID> missingJobIds =
+            new HashSet<JobID>(runningJobs.keySet());
+
+        JobStatus[] jstatus = client.jobsToComplete();
+        for (JobStatus status : jstatus) {
+
+          JobID jobId = status.getJobID();
+          missingJobIds.remove(jobId);
+
+          HadoopJob hJob;
+          synchronized (HadoopCluster.this.runningJobs) {
+            hJob = runningJobs.get(jobId);
+            if (hJob == null) {
+              // Unknown job, create an entry
+              RunningJob running = client.getJob(jobId);
+              hJob =
+                  new HadoopJob(HadoopCluster.this, jobId, running, status);
+              newJob(hJob);
+            }
+          }
+
+          // Update HadoopJob with fresh infos
+          updateJob(hJob, status);
+        }
+
+        // Ask explicitly for fresh info for these Job IDs
+        for (JobID jobId : missingJobIds) {
+          HadoopJob hJob = runningJobs.get(jobId);
+          if (!hJob.isCompleted())
+            updateJob(hJob, null);
+        }
+
+      } catch (IOException ioe) {
+        client = null;
+        return new Status(Status.ERROR, Activator.PLUGIN_ID, 0,
+            "Cannot retrieve running Jobs on location: "
+                          + HadoopCluster.this.getLocationName(), ioe);
+      }
+
+      // Schedule the next observation
+      schedule(STATUS_OBSERVATION_DELAY);
+
+      return Status.OK_STATUS;
+    }
+
+    /**
+     * Stores and make the new job available
+     * 
+     * @param data
+     */
+    private void newJob(final HadoopJob data) {
+      runningJobs.put(data.getJobID(), data);
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobAdded(data);
+        }
+      });
+    }
+
+    /**
+     * Updates the status of a job
+     * 
+     * @param job the job to update
+     */
+    private void updateJob(final HadoopJob job, JobStatus status) {
+      job.update(status);
+
+      Display.getDefault().asyncExec(new Runnable() {
+        public void run() {
+          fireJobChanged(job);
+        }
+      });
+    }
+
+  }
+
+  static Logger log = Logger.getLogger(HadoopCluster.class.getName());
+
+  /**
+   * Hadoop configuration of the location. Also contains specific parameters
+   * for the plug-in. These parameters are prefix with eclipse.plug-in.*
+   */
+  private Configuration conf;
+
+  /**
+   * Jobs listeners
+   */
+  private Set<IJobListener> jobListeners = new HashSet<IJobListener>();
+
+  /**
+   * Jobs running on this location. The keys of this map are the Job IDs.
+   */
+  private transient Map<JobID, HadoopJob> runningJobs =
+      Collections.synchronizedMap(new TreeMap<JobID, HadoopJob>());
+
+  /**
+   * Status updater for this location
+   */
+  private LocationStatusUpdater statusUpdater;
+
+  // state and status - transient
+  private transient String state = "";
+
+  /**
+   * Creates a new default Hadoop location
+   */
+  public HadoopCluster() {
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
+  }
+
+  /**
+   * Creates a location from a file
+   * 
+   * @throws IOException
+   * @throws SAXException
+   * @throws ParserConfigurationException
+   */
+  public HadoopCluster(File file) throws ParserConfigurationException,
+      SAXException, IOException {
+
+    this.conf = new Configuration();
+    this.addPluginConfigDefaultProperties();
+    this.loadFromXML(file);
+  }
+
+  /**
+   * Create a new Hadoop location by copying an already existing one.
+   * 
+   * @param source the location to copy
+   */
+  public HadoopCluster(HadoopCluster existing) {
+    this();
+    this.load(existing);
+  }
+
+  public void addJobListener(IJobListener l) {
+    jobListeners.add(l);
+  }
+
+  public void dispose() {
+    // TODO close DFS connections?
+  }
+
+  /**
+   * List all elements that should be present in the Server window (all
+   * servers and all jobs running on each servers)
+   * 
+   * @return collection of jobs for this location
+   */
+  public Collection<HadoopJob> getJobs() {
+    startStatusUpdater();
+    return this.runningJobs.values();
+  }
+
+  /**
+   * Remove the given job from the currently running jobs map
+   * 
+   * @param job the job to remove
+   */
+  public void purgeJob(final HadoopJob job) {
+    runningJobs.remove(job.getJobID());
+    Display.getDefault().asyncExec(new Runnable() {
+      public void run() {
+        fireJobRemoved(job);
+      }
+    });
+  }
+
+  /**
+   * Returns the {@link Configuration} defining this location.
+   * 
+   * @return the location configuration
+   */
+  public Configuration getConfiguration() {
+    return this.conf;
+  }
+
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param prop the configuration property
+   * @return the property value
+   */
+  public String getConfProp(ConfProp prop) {
+    return prop.get(conf);
+  }
+
+  /**
+   * Gets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @return the property value
+   */
+  public String getConfProp(String propName) {
+    return this.conf.get(propName);
+  }
+
+  public String getLocationName() {
+    return ConfProp.PI_LOCATION_NAME.get(conf);
+  }
+
+  /**
+   * Returns the master host name of the Hadoop location (the Job tracker)
+   * 
+   * @return the host name of the Job tracker
+   */
+  public String getMasterHostName() {
+    return getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+  }
+
+  public String getState() {
+    return state;
+  }
+
+  /**
+   * Overwrite this location with the given existing location
+   * 
+   * @param existing the existing location
+   */
+  public void load(HadoopCluster existing) {
+    this.conf = new Configuration(existing.conf);
+  }
+
+  /**
+   * Overwrite this location with settings available in the given XML file.
+   * The existing configuration is preserved if the XML file is invalid.
+   * 
+   * @param file the file path of the XML file
+   * @return validity of the XML file
+   * @throws ParserConfigurationException
+   * @throws IOException
+   * @throws SAXException
+   */
+  public boolean loadFromXML(File file) throws ParserConfigurationException,
+      SAXException, IOException {
+
+    Configuration newConf = new Configuration(this.conf);
+
+    DocumentBuilder builder =
+        DocumentBuilderFactory.newInstance().newDocumentBuilder();
+    Document document = builder.parse(file);
+
+    Element root = document.getDocumentElement();
+    if (!"configuration".equals(root.getTagName()))
+      return false;
+    NodeList props = root.getChildNodes();
+    for (int i = 0; i < props.getLength(); i++) {
+      Node propNode = props.item(i);
+      if (!(propNode instanceof Element))
+        continue;
+      Element prop = (Element) propNode;
+      if (!"property".equals(prop.getTagName()))
+        return false;
+      NodeList fields = prop.getChildNodes();
+      String attr = null;
+      String value = null;
+      for (int j = 0; j < fields.getLength(); j++) {
+        Node fieldNode = fields.item(j);
+        if (!(fieldNode instanceof Element))
+          continue;
+        Element field = (Element) fieldNode;
+        if ("name".equals(field.getTagName()))
+          attr = ((Text) field.getFirstChild()).getData();
+        if ("value".equals(field.getTagName()) && field.hasChildNodes())
+          value = ((Text) field.getFirstChild()).getData();
+      }
+      if (attr != null && value != null)
+        newConf.set(attr, value);
+    }
+
+    this.conf = newConf;
+    return true;
+  }
+
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param prop the property
+   * @param propvalue the property value
+   */
+  public void setConfProp(ConfProp prop, String propValue) {
+    prop.set(conf, propValue);
+  }
+
+  /**
+   * Sets a Hadoop configuration property value
+   * 
+   * @param propName the property name
+   * @param propValue the property value
+   */
+  public void setConfProp(String propName, String propValue) {
+    this.conf.set(propName, propValue);
+  }
+
+  public void setLocationName(String newName) {
+    ConfProp.PI_LOCATION_NAME.set(conf, newName);
+  }
+
+  /**
+   * Write this location settings to the given output stream
+   * 
+   * @param out the output stream
+   * @throws IOException
+   */
+  public void storeSettingsToFile(File file) throws IOException {
+    FileOutputStream fos = new FileOutputStream(file);
+    try {
+      this.conf.writeXml(fos);
+      fos.close();
+      fos = null;
+    } finally {
+      IOUtils.closeStream(fos);
+    }
+
+  }
+
+  /* @inheritDoc */
+  @Override
+  public String toString() {
+    return this.getLocationName();
+  }
+
+  /**
+   * Fill the configuration with valid default values
+   */
+  private void addPluginConfigDefaultProperties() {
+    for (ConfProp prop : ConfProp.values()) {
+      if (conf.get(prop.name) == null)
+        conf.set(prop.name, prop.defVal);
+    }
+  }
+
+  /**
+   * Starts the location status updater
+   */
+  private synchronized void startStatusUpdater() {
+    if (statusUpdater == null) {
+      statusUpdater = new LocationStatusUpdater();
+      statusUpdater.schedule();
+    }
+  }
+
+  /*
+   * Rewrite of the connecting and tunneling to the Hadoop location
+   */
+
+  /**
+   * Provides access to the default file system of this location.
+   * 
+   * @return a {@link FileSystem}
+   */
+  public FileSystem getDFS() throws IOException {
+    return FileSystem.get(this.conf);
+  }
+
+  /**
+   * Provides access to the Job tracking system of this location
+   * 
+   * @return a {@link JobClient}
+   */
+  public JobClient getJobClient() throws IOException {
+    JobConf jconf = new JobConf(this.conf);
+    return new JobClient(jconf);
+  }
+
+  /*
+   * Listeners handling
+   */
+
+  protected void fireJarPublishDone(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishDone(jar);
+    }
+  }
+
+  protected void fireJarPublishStart(JarModule jar) {
+    for (IJobListener listener : jobListeners) {
+      listener.publishStart(jar);
+    }
+  }
+
+  protected void fireJobAdded(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobAdded(job);
+    }
+  }
+
+  protected void fireJobRemoved(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobRemoved(job);
+    }
+  }
+
+  protected void fireJobChanged(HadoopJob job) {
+    for (IJobListener listener : jobListeners) {
+      listener.jobChanged(job);
+    }
+  }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
new file mode 100644
index 0000000..78f3fc6
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/HadoopJob.java
@@ -0,0 +1,349 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobStatus;
+import org.apache.hadoop.mapred.RunningJob;
+
+/**
+ * Representation of a Map/Reduce running job on a given location
+ */
+
+public class HadoopJob {
+
+  /**
+   * Enum representation of a Job state
+   */
+  public enum JobState {
+    PREPARE(JobStatus.PREP), RUNNING(JobStatus.RUNNING), FAILED(
+        JobStatus.FAILED), SUCCEEDED(JobStatus.SUCCEEDED);
+
+    final int state;
+
+    JobState(int state) {
+      this.state = state;
+    }
+
+    static JobState ofInt(int state) {
+      if (state == JobStatus.PREP) {
+        return PREPARE;
+      }
+      else if (state == JobStatus.RUNNING) {
+        return RUNNING;
+      }
+      else if (state == JobStatus.FAILED) {
+        return FAILED;
+      }
+      else if (state == JobStatus.SUCCEEDED) {
+        return SUCCEEDED;
+      }
+      else {
+        return null;
+      }
+    }
+  }
+
+  /**
+   * Location this Job runs on
+   */
+  private final HadoopCluster location;
+
+  /**
+   * Unique identifier of this Job
+   */
+  final JobID jobId;
+
+  /**
+   * Status representation of a running job. This actually contains a
+   * reference to a JobClient. Its methods might block.
+   */
+  RunningJob running;
+
+  /**
+   * Last polled status
+   * 
+   * @deprecated should apparently not be used
+   */
+  JobStatus status;
+
+  /**
+   * Last polled counters
+   */
+  Counters counters;
+
+  /**
+   * Job Configuration
+   */
+  JobConf jobConf = null;
+
+  boolean completed = false;
+
+  boolean successful = false;
+
+  boolean killed = false;
+
+  int totalMaps;
+
+  int totalReduces;
+
+  int completedMaps;
+
+  int completedReduces;
+
+  float mapProgress;
+
+  float reduceProgress;
+
+  /**
+   * Constructor for a Hadoop job representation
+   * 
+   * @param location
+   * @param id
+   * @param running
+   * @param status
+   */
+  public HadoopJob(HadoopCluster location, JobID id, RunningJob running,
+      JobStatus status) {
+
+    this.location = location;
+    this.jobId = id;
+    this.running = running;
+
+    loadJobFile();
+
+    update(status);
+  }
+
+  /**
+   * Try to locate and load the JobConf file for this job so to get more
+   * details on the job (number of maps and of reduces)
+   */
+  private void loadJobFile() {
+    try {
+      String jobFile = getJobFile();
+      FileSystem fs = location.getDFS();
+      File tmp = File.createTempFile(getJobID().toString(), ".xml");
+      if (FileUtil.copy(fs, new Path(jobFile), tmp, false, location
+          .getConfiguration())) {
+        this.jobConf = new JobConf(tmp.toString());
+
+        this.totalMaps = jobConf.getNumMapTasks();
+        this.totalReduces = jobConf.getNumReduceTasks();
+      }
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+  }
+
+  /* @inheritDoc */
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((jobId == null) ? 0 : jobId.hashCode());
+    result = prime * result + ((location == null) ? 0 : location.hashCode());
+    return result;
+  }
+
+  /* @inheritDoc */
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (!(obj instanceof HadoopJob))
+      return false;
+    final HadoopJob other = (HadoopJob) obj;
+    if (jobId == null) {
+      if (other.jobId != null)
+        return false;
+    } else if (!jobId.equals(other.jobId))
+      return false;
+    if (location == null) {
+      if (other.location != null)
+        return false;
+    } else if (!location.equals(other.location))
+      return false;
+    return true;
+  }
+
+  /**
+   * Get the running status of the Job (@see {@link JobStatus}).
+   * 
+   * @return
+   */
+  public JobState getState() {
+    if (this.completed) {
+      if (this.successful) {
+        return JobState.SUCCEEDED;
+      } else {
+        return JobState.FAILED;
+      }
+    } else {
+      return JobState.RUNNING;
+    }
+    // return JobState.ofInt(this.status.getRunState());
+  }
+
+  /**
+   * @return
+   */
+  public JobID getJobID() {
+    return this.jobId;
+  }
+
+  /**
+   * @return
+   */
+  public HadoopCluster getLocation() {
+    return this.location;
+  }
+
+  /**
+   * @return
+   */
+  public boolean isCompleted() {
+    return this.completed;
+  }
+
+  /**
+   * @return
+   */
+  public String getJobName() {
+    return this.running.getJobName();
+  }
+
+  /**
+   * @return
+   */
+  public String getJobFile() {
+    return this.running.getJobFile();
+  }
+
+  /**
+   * Return the tracking URL for this Job.
+   * 
+   * @return string representation of the tracking URL for this Job
+   */
+  public String getTrackingURL() {
+    return this.running.getTrackingURL();
+  }
+
+  /**
+   * Returns a string representation of this job status
+   * 
+   * @return string representation of this job status
+   */
+  public String getStatus() {
+
+    StringBuffer s = new StringBuffer();
+
+    s.append("Maps : " + completedMaps + "/" + totalMaps);
+    s.append(" (" + mapProgress + ")");
+    s.append("  Reduces : " + completedReduces + "/" + totalReduces);
+    s.append(" (" + reduceProgress + ")");
+
+    return s.toString();
+  }
+
+  /**
+   * Update this job status according to the given JobStatus
+   * 
+   * @param status
+   */
+  void update(JobStatus status) {
+    this.status = status;
+    try {
+      this.counters = running.getCounters();
+      this.completed = running.isComplete();
+      this.successful = running.isSuccessful();
+      this.mapProgress = running.mapProgress();
+      this.reduceProgress = running.reduceProgress();
+      // running.getTaskCompletionEvents(fromEvent);
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+    }
+
+    this.completedMaps = (int) (this.totalMaps * this.mapProgress);
+    this.completedReduces = (int) (this.totalReduces * this.reduceProgress);
+  }
+
+  /**
+   * Print this job counters (for debugging purpose)
+   */
+  void printCounters() {
+    System.out.printf("New Job:\n", counters);
+    for (String groupName : counters.getGroupNames()) {
+      Counters.Group group = counters.getGroup(groupName);
+      System.out.printf("\t%s[%s]\n", groupName, group.getDisplayName());
+
+      for (Counters.Counter counter : group) {
+        System.out.printf("\t\t%s: %s\n", counter.getDisplayName(),
+                                          counter.getCounter());
+      }
+    }
+    System.out.printf("\n");
+  }
+
+  /**
+   * Kill this job
+   */
+  public void kill() {
+    try {
+      this.running.killJob();
+      this.killed = true;
+
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Print this job status (for debugging purpose)
+   */
+  public void display() {
+    System.out.printf("Job id=%s, name=%s\n", getJobID(), getJobName());
+    System.out.printf("Configuration file: %s\n", getJobID());
+    System.out.printf("Tracking URL: %s\n", getTrackingURL());
+
+    System.out.printf("Completion: map: %f reduce %f\n",
+        100.0 * this.mapProgress, 100.0 * this.reduceProgress);
+
+    System.out.println("Job total maps = " + totalMaps);
+    System.out.println("Job completed maps = " + completedMaps);
+    System.out.println("Map percentage complete = " + mapProgress);
+    System.out.println("Job total reduces = " + totalReduces);
+    System.out.println("Job completed reduces = " + completedReduces);
+    System.out.println("Reduce percentage complete = " + reduceProgress);
+    System.out.flush();
+  }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
new file mode 100644
index 0000000..2f7a245
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IHadoopClusterListener.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import org.apache.hdt.ui.cluster.HadoopCluster;
+
+/**
+ * Interface for monitoring server changes
+ */
+public interface IHadoopClusterListener {
+  void serverChanged(HadoopCluster location, int type);
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
new file mode 100644
index 0000000..5d001c4
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/IJobListener.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import org.apache.hdt.ui.cluster.utils.JarModule;
+
+/**
+ * Interface for updating/adding jobs to the MapReduce Server view.
+ */
+public interface IJobListener {
+
+  void jobChanged(HadoopJob job);
+
+  void jobAdded(HadoopJob job);
+
+  void jobRemoved(HadoopJob job);
+
+  void publishStart(JarModule jar);
+
+  void publishDone(JarModule jar);
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
new file mode 100644
index 0000000..a1a990e
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/ServerRegistry.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.eclipse.jface.dialogs.MessageDialog;
+
+/**
+ * Register of Hadoop locations.
+ * 
+ * Each location corresponds to a Hadoop {@link Configuration} stored as an
+ * XML file in the workspace plug-in configuration directory:
+ * <p>
+ * <tt>
+ * &lt;workspace-dir&gt;/.metadata/.plugins/org.apache.hadoop.eclipse/locations/*.xml
+ * </tt>
+ * 
+ */
+public class ServerRegistry {
+
+  private static final ServerRegistry INSTANCE = new ServerRegistry();
+
+  public static final int SERVER_ADDED = 0;
+
+  public static final int SERVER_REMOVED = 1;
+
+  public static final int SERVER_STATE_CHANGED = 2;
+
+  private final File baseDir =
+      Activator.getDefault().getStateLocation().toFile();
+
+  private final File saveDir = new File(baseDir, "locations");
+
+  private ServerRegistry() {
+    if (saveDir.exists() && !saveDir.isDirectory())
+      saveDir.delete();
+    if (!saveDir.exists())
+      saveDir.mkdirs();
+
+    load();
+  }
+
+  private Map<String, HadoopCluster> servers;
+
+  private Set<IHadoopClusterListener> listeners =
+      new HashSet<IHadoopClusterListener>();
+
+  public static ServerRegistry getInstance() {
+    return INSTANCE;
+  }
+
+  public synchronized Collection<HadoopCluster> getServers() {
+    return Collections.unmodifiableCollection(servers.values());
+  }
+
+  /**
+   * Load all available locations from the workspace configuration directory.
+   */
+  private synchronized void load() {
+    Map<String, HadoopCluster> map = new TreeMap<String, HadoopCluster>();
+    for (File file : saveDir.listFiles()) {
+      try {
+        HadoopCluster server = new HadoopCluster(file);
+        map.put(server.getLocationName(), server);
+
+      } catch (Exception exn) {
+        System.err.println(exn);
+      }
+    }
+    this.servers = map;
+  }
+
+  private synchronized void store() {
+    try {
+      File dir = File.createTempFile("locations", "new", baseDir);
+      dir.delete();
+      dir.mkdirs();
+
+      for (HadoopCluster server : servers.values()) {
+        server.storeSettingsToFile(new File(dir, server.getLocationName()
+            + ".xml"));
+      }
+
+      FilenameFilter XMLFilter = new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          String lower = name.toLowerCase();
+          return lower.endsWith(".xml");
+        }
+      };
+
+      File backup = new File(baseDir, "locations.backup");
+      if (backup.exists()) {
+        for (File file : backup.listFiles(XMLFilter))
+          if (!file.delete())
+            throw new IOException("Unable to delete backup location file: "
+                + file);
+        if (!backup.delete())
+          throw new IOException(
+              "Unable to delete backup location directory: " + backup);
+      }
+
+      saveDir.renameTo(backup);
+      dir.renameTo(saveDir);
+
+    } catch (IOException ioe) {
+      ioe.printStackTrace();
+      MessageDialog.openError(null,
+          "Saving configuration of Hadoop locations failed", ioe.toString());
+    }
+  }
+
+  public void dispose() {
+    for (HadoopCluster server : getServers()) {
+      server.dispose();
+    }
+  }
+
+  public synchronized HadoopCluster getServer(String location) {
+    return servers.get(location);
+  }
+
+  /*
+   * HadoopServer map listeners
+   */
+
+  public void addListener(IHadoopClusterListener l) {
+    synchronized (listeners) {
+      listeners.add(l);
+    }
+  }
+
+  public void removeListener(IHadoopClusterListener l) {
+    synchronized (listeners) {
+      listeners.remove(l);
+    }
+  }
+
+  private void fireListeners(HadoopCluster location, int kind) {
+    synchronized (listeners) {
+      for (IHadoopClusterListener listener : listeners) {
+        listener.serverChanged(location, kind);
+      }
+    }
+  }
+
+  public synchronized void removeServer(HadoopCluster server) {
+    this.servers.remove(server.getLocationName());
+    store();
+    fireListeners(server, SERVER_REMOVED);
+  }
+
+  public synchronized void addServer(HadoopCluster server) {
+    this.servers.put(server.getLocationName(), server);
+    store();
+    fireListeners(server, SERVER_ADDED);
+  }
+
+  /**
+   * Update one Hadoop location
+   * 
+   * @param originalName the original location name (might have changed)
+   * @param server the location
+   */
+  public synchronized void updateServer(String originalName,
+      HadoopCluster server) {
+
+    // Update the map if the location name has changed
+    if (!server.getLocationName().equals(originalName)) {
+      servers.remove(originalName);
+      servers.put(server.getLocationName(), server);
+    }
+    store();
+    fireListeners(server, SERVER_STATE_CHANGED);
+  }
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
new file mode 100644
index 0000000..71d5559
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/cluster/utils/JarModule.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.cluster.utils;
+
+import java.io.File;
+import java.util.logging.Logger;
+
+import org.apache.hdt.ui.Activator;
+import org.apache.hdt.ui.dialogs.ErrorMessageDialog;
+import org.eclipse.core.resources.IResource;
+import org.eclipse.core.runtime.IProgressMonitor;
+import org.eclipse.core.runtime.Path;
+import org.eclipse.jdt.core.ICompilationUnit;
+import org.eclipse.jdt.core.IJavaElement;
+import org.eclipse.jdt.core.IType;
+import org.eclipse.jdt.ui.jarpackager.IJarExportRunnable;
+import org.eclipse.jdt.ui.jarpackager.JarPackageData;
+import org.eclipse.jface.operation.IRunnableWithProgress;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.ui.PlatformUI;
+
+/**
+ * Methods for interacting with the jar file containing the
+ * Mapper/Reducer/Driver classes for a MapReduce job.
+ */
+
+public class JarModule implements IRunnableWithProgress {
+
+  static Logger log = Logger.getLogger(JarModule.class.getName());
+
+  private IResource resource;
+
+  private File jarFile;
+
+  public JarModule(IResource resource) {
+    this.resource = resource;
+  }
+
+  public String getName() {
+    return resource.getProject().getName() + "/" + resource.getName();
+  }
+
+  /**
+   * Creates a JAR file containing the given resource (Java class with
+   * main()) and all associated resources
+   * 
+   * @param resource the resource
+   * @return a file designing the created package
+   */
+  public void run(IProgressMonitor monitor) {
+
+    log.fine("Build jar");
+    JarPackageData jarrer = new JarPackageData();
+
+    jarrer.setExportJavaFiles(true);
+    jarrer.setExportClassFiles(true);
+    jarrer.setExportOutputFolders(true);
+    jarrer.setOverwrite(true);
+
+    try {
+      // IJavaProject project =
+      // (IJavaProject) resource.getProject().getNature(JavaCore.NATURE_ID);
+
+      // check this is the case before letting this method get called
+      Object element = resource.getAdapter(IJavaElement.class);
+      IType type = ((ICompilationUnit) element).findPrimaryType();
+      jarrer.setManifestMainClass(type);
+
+      // Create a temporary JAR file name
+      File baseDir = Activator.getDefault().getStateLocation().toFile();
+
+      String prefix =
+          String.format("%s_%s-", resource.getProject().getName(), resource
+              .getName());
+      File jarFile = File.createTempFile(prefix, ".jar", baseDir);
+      jarrer.setJarLocation(new Path(jarFile.getAbsolutePath()));
+
+      jarrer.setElements(resource.getProject().members(IResource.FILE));
+      IJarExportRunnable runnable =
+          jarrer.createJarExportRunnable(Display.getDefault()
+              .getActiveShell());
+      runnable.run(monitor);
+
+      this.jarFile = jarFile;
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Allow the retrieval of the resulting JAR file
+   * 
+   * @return the generated JAR file
+   */
+  public File getJarFile() {
+    return this.jarFile;
+  }
+
+  /**
+   * Static way to create a JAR package for the given resource and showing a
+   * progress bar
+   * 
+   * @param resource
+   * @return
+   */
+  public static File createJarPackage(IResource resource) {
+
+    JarModule jarModule = new JarModule(resource);
+    try {
+      PlatformUI.getWorkbench().getProgressService().run(false, true,
+          jarModule);
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      return null;
+    }
+
+    File jarFile = jarModule.getJarFile();
+    if (jarFile == null) {
+      ErrorMessageDialog.display("Run on Hadoop",
+          "Unable to create or locate the JAR file for the Job");
+      return null;
+    }
+
+    return jarFile;
+  }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
new file mode 100644
index 0000000..bb0137a
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/dialogs/ErrorMessageDialog.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.dialogs;
+
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.swt.widgets.Display;
+
+/**
+ * Error dialog helper
+ */
+public class ErrorMessageDialog {
+
+  public static void display(final String title, final String message) {
+    Display.getDefault().syncExec(new Runnable() {
+
+      public void run() {
+        MessageDialog.openError(Display.getDefault().getActiveShell(),
+            title, message);
+      }
+
+    });
+  }
+
+  public static void display(Exception e) {
+    display("An exception has occured!", "Exception description:\n"
+        + e.getLocalizedMessage());
+  }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
new file mode 100644
index 0000000..6b81b8d
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/views/ClusterView.java
@@ -0,0 +1,460 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.views;
+
+import java.util.Collection;
+
+import org.apache.hdt.ui.ImageLibrary;
+import org.apache.hdt.ui.actions.EditLocationAction;
+import org.apache.hdt.ui.actions.NewLocationAction;
+import org.apache.hdt.ui.cluster.HadoopJob;
+import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.apache.hdt.ui.cluster.IJobListener;
+import org.apache.hdt.ui.cluster.utils.JarModule;
+import org.apache.hdt.ui.cluster.IHadoopClusterListener;
+import org.apache.hdt.ui.cluster.ServerRegistry;
+import org.eclipse.jface.action.Action;
+import org.eclipse.jface.action.IAction;
+import org.eclipse.jface.action.IMenuListener;
+import org.eclipse.jface.action.IMenuManager;
+import org.eclipse.jface.action.MenuManager;
+import org.eclipse.jface.dialogs.MessageDialog;
+import org.eclipse.jface.viewers.ILabelProviderListener;
+import org.eclipse.jface.viewers.ISelection;
+import org.eclipse.jface.viewers.ISelectionChangedListener;
+import org.eclipse.jface.viewers.IStructuredSelection;
+import org.eclipse.jface.viewers.ITableLabelProvider;
+import org.eclipse.jface.viewers.ITreeContentProvider;
+import org.eclipse.jface.viewers.ITreeSelection;
+import org.eclipse.jface.viewers.SelectionChangedEvent;
+import org.eclipse.jface.viewers.TreeViewer;
+import org.eclipse.jface.viewers.Viewer;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Menu;
+import org.eclipse.swt.widgets.Tree;
+import org.eclipse.swt.widgets.TreeColumn;
+import org.eclipse.ui.IViewSite;
+import org.eclipse.ui.PartInitException;
+import org.eclipse.ui.actions.ActionFactory;
+import org.eclipse.ui.part.ViewPart;
+
+/**
+ * Map/Reduce locations view: displays all available Hadoop locations and the
+ * Jobs running/finished on these locations
+ */
+public class ClusterView extends ViewPart implements ITreeContentProvider,
+    ITableLabelProvider, IJobListener, IHadoopClusterListener {
+
+  /**
+   * Deletion action: delete a Hadoop location, kill a running job or remove
+   * a finished job entry
+   */
+  class DeleteAction extends Action {
+
+    DeleteAction() {
+      setText("Delete");
+      setImageDescriptor(ImageLibrary.get("server.view.action.delete"));
+    }
+
+    /* @inheritDoc */
+    @Override
+    public void run() {
+      ISelection selection =
+          getViewSite().getSelectionProvider().getSelection();
+      if ((selection != null) && (selection instanceof IStructuredSelection)) {
+        Object selItem =
+            ((IStructuredSelection) selection).getFirstElement();
+
+        if (selItem instanceof HadoopCluster) {
+          HadoopCluster location = (HadoopCluster) selItem;
+          if (MessageDialog.openConfirm(Display.getDefault()
+              .getActiveShell(), "Confirm delete Hadoop location",
+              "Do you really want to remove the Hadoop location: "
+                  + location.getLocationName())) {
+            ServerRegistry.getInstance().removeServer(location);
+          }
+
+        } else if (selItem instanceof HadoopJob) {
+
+          // kill the job
+          HadoopJob job = (HadoopJob) selItem;
+          if (job.isCompleted()) {
+            // Job already finished, remove the entry
+            job.getLocation().purgeJob(job);
+
+          } else {
+            // Job is running, kill the job?
+            if (MessageDialog.openConfirm(Display.getDefault()
+                .getActiveShell(), "Confirm kill running Job",
+                "Do you really want to kill running Job: " + job.getJobID())) {
+              job.kill();
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * This object is the root content for this content provider
+   */
+  private static final Object CONTENT_ROOT = new Object();
+
+  private final IAction deleteAction = new DeleteAction();
+
+  private final IAction editServerAction = new EditLocationAction(this);
+
+  private final IAction newLocationAction = new NewLocationAction();
+
+  private TreeViewer viewer;
+
+  public ClusterView() {
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void init(IViewSite site) throws PartInitException {
+    super.init(site);
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void dispose() {
+    ServerRegistry.getInstance().removeListener(this);
+  }
+
+  /**
+   * Creates the columns for the view
+   */
+  @Override
+  public void createPartControl(Composite parent) {
+    Tree main =
+        new Tree(parent, SWT.SINGLE | SWT.FULL_SELECTION | SWT.H_SCROLL
+            | SWT.V_SCROLL);
+    main.setHeaderVisible(true);
+    main.setLinesVisible(false);
+    main.setLayoutData(new GridData(GridData.FILL_BOTH));
+
+    TreeColumn serverCol = new TreeColumn(main, SWT.SINGLE);
+    serverCol.setText("Location");
+    serverCol.setWidth(300);
+    serverCol.setResizable(true);
+
+    TreeColumn locationCol = new TreeColumn(main, SWT.SINGLE);
+    locationCol.setText("Master node");
+    locationCol.setWidth(185);
+    locationCol.setResizable(true);
+
+    TreeColumn stateCol = new TreeColumn(main, SWT.SINGLE);
+    stateCol.setText("State");
+    stateCol.setWidth(95);
+    stateCol.setResizable(true);
+
+    TreeColumn statusCol = new TreeColumn(main, SWT.SINGLE);
+    statusCol.setText("Status");
+    statusCol.setWidth(300);
+    statusCol.setResizable(true);
+
+    viewer = new TreeViewer(main);
+    viewer.setContentProvider(this);
+    viewer.setLabelProvider(this);
+    viewer.setInput(CONTENT_ROOT); // don't care
+
+    getViewSite().setSelectionProvider(viewer);
+    
+    getViewSite().getActionBars().setGlobalActionHandler(
+        ActionFactory.DELETE.getId(), deleteAction);
+    getViewSite().getActionBars().getToolBarManager().add(editServerAction);
+    getViewSite().getActionBars().getToolBarManager().add(newLocationAction);
+
+    createActions();
+    createContextMenu();
+  }
+
+  /**
+   * Actions
+   */
+  private void createActions() {
+    /*
+     * addItemAction = new Action("Add...") { public void run() { addItem(); } };
+     * addItemAction.setImageDescriptor(ImageLibrary
+     * .get("server.view.location.new"));
+     */
+    /*
+     * deleteItemAction = new Action("Delete") { public void run() {
+     * deleteItem(); } };
+     * deleteItemAction.setImageDescriptor(getImageDescriptor("delete.gif"));
+     * 
+     * selectAllAction = new Action("Select All") { public void run() {
+     * selectAll(); } };
+     */
+    // Add selection listener.
+    viewer.addSelectionChangedListener(new ISelectionChangedListener() {
+      public void selectionChanged(SelectionChangedEvent event) {
+        updateActionEnablement();
+      }
+    });
+  }
+
+  private void addItem() {
+    System.out.printf("ADD ITEM\n");
+  }
+
+  private void updateActionEnablement() {
+    IStructuredSelection sel = (IStructuredSelection) viewer.getSelection();
+    // deleteItemAction.setEnabled(sel.size() > 0);
+  }
+
+  /**
+   * Contextual menu
+   */
+  private void createContextMenu() {
+    // Create menu manager.
+    MenuManager menuMgr = new MenuManager();
+    menuMgr.setRemoveAllWhenShown(true);
+    menuMgr.addMenuListener(new IMenuListener() {
+      public void menuAboutToShow(IMenuManager mgr) {
+        fillContextMenu(mgr);
+      }
+    });
+
+    // Create menu.
+    Menu menu = menuMgr.createContextMenu(viewer.getControl());
+    viewer.getControl().setMenu(menu);
+
+    // Register menu for extension.
+    getSite().registerContextMenu(menuMgr, viewer);
+  }
+
+  private void fillContextMenu(IMenuManager mgr) {
+    mgr.add(newLocationAction);
+    mgr.add(editServerAction);
+    mgr.add(deleteAction);
+    /*
+     * mgr.add(new GroupMarker(IWorkbenchActionConstants.MB_ADDITIONS));
+     * mgr.add(deleteItemAction); mgr.add(new Separator());
+     * mgr.add(selectAllAction);
+     */
+  }
+
+  /* @inheritDoc */
+  @Override
+  public void setFocus() {
+
+  }
+
+  /*
+   * IHadoopServerListener implementation
+   */
+
+  /* @inheritDoc */
+  public void serverChanged(HadoopCluster location, int type) {
+    Display.getDefault().syncExec(new Runnable() {
+      public void run() {
+        ClusterView.this.viewer.refresh();
+      }
+    });
+  }
+
+  /*
+   * IStructuredContentProvider implementation
+   */
+
+  /* @inheritDoc */
+  public void inputChanged(final Viewer viewer, Object oldInput,
+      Object newInput) {
+    if (oldInput == CONTENT_ROOT)
+      ServerRegistry.getInstance().removeListener(this);
+    if (newInput == CONTENT_ROOT)
+      ServerRegistry.getInstance().addListener(this);
+  }
+
+  /**
+   * The root elements displayed by this view are the existing Hadoop
+   * locations
+   */
+  /* @inheritDoc */
+  public Object[] getElements(Object inputElement) {
+    return ServerRegistry.getInstance().getServers().toArray();
+  }
+
+  /*
+   * ITreeStructuredContentProvider implementation
+   */
+
+  /**
+   * Each location contains a child entry for each job it runs.
+   */
+  /* @inheritDoc */
+  public Object[] getChildren(Object parent) {
+
+    if (parent instanceof HadoopCluster) {
+      HadoopCluster location = (HadoopCluster) parent;
+      location.addJobListener(this);
+      Collection<HadoopJob> jobs = location.getJobs();
+      return jobs.toArray();
+    }
+
+    return null;
+  }
+
+  /* @inheritDoc */
+  public Object getParent(Object element) {
+    if (element instanceof HadoopCluster) {
+      return CONTENT_ROOT;
+
+    } else if (element instanceof HadoopJob) {
+      return ((HadoopJob) element).getLocation();
+    }
+
+    return null;
+  }
+
+  /* @inheritDoc */
+  public boolean hasChildren(Object element) {
+    /* Only server entries have children */
+    return (element instanceof HadoopCluster);
+  }
+
+  /*
+   * ITableLabelProvider implementation
+   */
+
+  /* @inheritDoc */
+  public void addListener(ILabelProviderListener listener) {
+    // no listeners handling
+  }
+
+  public boolean isLabelProperty(Object element, String property) {
+    return false;
+  }
+
+  /* @inheritDoc */
+  public void removeListener(ILabelProviderListener listener) {
+    // no listener handling
+  }
+
+  /* @inheritDoc */
+  public Image getColumnImage(Object element, int columnIndex) {
+    if ((columnIndex == 0) && (element instanceof HadoopCluster)) {
+      return ImageLibrary.getImage("server.view.location.entry");
+
+    } else if ((columnIndex == 0) && (element instanceof HadoopJob)) {
+      return ImageLibrary.getImage("server.view.job.entry");
+    }
+    return null;
+  }
+
+  /* @inheritDoc */
+  public String getColumnText(Object element, int columnIndex) {
+    if (element instanceof HadoopCluster) {
+      HadoopCluster server = (HadoopCluster) element;
+
+      switch (columnIndex) {
+        case 0:
+          return server.getLocationName();
+        case 1:
+          return server.getMasterHostName().toString();
+        case 2:
+          return server.getState();
+        case 3:
+          return "";
+      }
+    } else if (element instanceof HadoopJob) {
+      HadoopJob job = (HadoopJob) element;
+
+      switch (columnIndex) {
+        case 0:
+          return job.getJobID().toString();
+        case 1:
+          return "";
+        case 2:
+          return job.getState().toString();
+        case 3:
+          return job.getStatus();
+      }
+    } else if (element instanceof JarModule) {
+      JarModule jar = (JarModule) element;
+
+      switch (columnIndex) {
+        case 0:
+          return jar.toString();
+        case 1:
+          return "Publishing jar to server..";
+        case 2:
+          return "";
+      }
+    }
+
+    return null;
+  }
+
+  /*
+   * IJobListener (Map/Reduce Jobs listener) implementation
+   */
+
+  /* @inheritDoc */
+  public void jobAdded(HadoopJob job) {
+    viewer.refresh();
+  }
+
+  /* @inheritDoc */
+  public void jobRemoved(HadoopJob job) {
+    viewer.refresh();
+  }
+
+  /* @inheritDoc */
+  public void jobChanged(HadoopJob job) {
+    viewer.refresh(job);
+  }
+
+  /* @inheritDoc */
+  public void publishDone(JarModule jar) {
+    viewer.refresh();
+  }
+
+  /* @inheritDoc */
+  public void publishStart(JarModule jar) {
+    viewer.refresh();
+  }
+
+  /*
+   * Miscellaneous
+   */
+
+  /**
+   * Return the currently selected server (null if there is no selection or
+   * if the selection is not a server)
+   * 
+   * @return the currently selected server entry
+   */
+  public HadoopCluster getSelectedServer() {
+    ITreeSelection selection = (ITreeSelection) viewer.getSelection();
+    Object first = selection.getFirstElement();
+    if (first instanceof HadoopCluster) {
+      return (HadoopCluster) first;
+    }
+    return null;
+  }
+
+}
diff --git a/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java b/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java
new file mode 100644
index 0000000..7079c37
--- /dev/null
+++ b/org.apache.hdt.ui/src/org/apache/hdt/ui/wizards/HadoopLocationWizard.java
@@ -0,0 +1,973 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hdt.ui.wizards;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hdt.ui.cluster.ConfProp;
+import org.apache.hdt.ui.cluster.HadoopCluster;
+import org.apache.hdt.ui.cluster.ServerRegistry;
+import org.eclipse.jface.dialogs.IMessageProvider;
+import org.eclipse.jface.wizard.WizardPage;
+import org.eclipse.swt.SWT;
+import org.eclipse.swt.custom.ScrolledComposite;
+import org.eclipse.swt.events.ModifyEvent;
+import org.eclipse.swt.events.ModifyListener;
+import org.eclipse.swt.events.SelectionEvent;
+import org.eclipse.swt.events.SelectionListener;
+import org.eclipse.swt.graphics.Image;
+import org.eclipse.swt.layout.GridData;
+import org.eclipse.swt.layout.GridLayout;
+import org.eclipse.swt.widgets.Button;
+import org.eclipse.swt.widgets.Composite;
+import org.eclipse.swt.widgets.Control;
+import org.eclipse.swt.widgets.Display;
+import org.eclipse.swt.widgets.Event;
+import org.eclipse.swt.widgets.Group;
+import org.eclipse.swt.widgets.Label;
+import org.eclipse.swt.widgets.Listener;
+import org.eclipse.swt.widgets.TabFolder;
+import org.eclipse.swt.widgets.TabItem;
+import org.eclipse.swt.widgets.Text;
+
+/**
+ * Wizard for editing the settings of a Hadoop location
+ * 
+ * The wizard contains 3 tabs: General, Tunneling and Advanced. It edits
+ * parameters of the location member which either a new location or a copy of
+ * an existing registered location.
+ */
+
+public class HadoopLocationWizard extends WizardPage {
+
+  Image circle;
+
+  /**
+   * The location effectively edited by the wizard. This location is a copy
+   * or a new one.
+   */
+  private HadoopCluster location;
+
+  /**
+   * The original location being edited by the wizard (null if we create a
+   * new instance).
+   */
+  private HadoopCluster original;
+
+  /**
+   * New Hadoop location wizard
+   */
+  public HadoopLocationWizard() {
+    super("Hadoop Server", "New Hadoop Location", null);
+
+    this.original = null;
+    this.location = new HadoopCluster();
+    this.location.setLocationName("");
+  }
+
+  /**
+   * Constructor to edit the parameters of an existing Hadoop server
+   * 
+   * @param server
+   */
+  public HadoopLocationWizard(HadoopCluster server) {
+    super("Create a new Hadoop location", "Edit Hadoop Location", null);
+
+    this.original = server;
+    this.location = new HadoopCluster(server);
+  }
+
+  /**
+   * Performs any actions appropriate in response to the user having pressed
+   * the Finish button, or refuse if finishing now is not permitted.
+   * 
+   * @return the created or updated Hadoop location
+   */
+
+  public HadoopCluster performFinish() {
+    try {
+      if (this.original == null) {
+        // New location
+        Display.getDefault().syncExec(new Runnable() {
+          public void run() {
+            ServerRegistry.getInstance().addServer(
+                HadoopLocationWizard.this.location);
+          }
+        });
+        return this.location;
+
+      } else {
+        // Update location
+        final String originalName = this.original.getLocationName();
+        this.original.load(this.location);
+
+        Display.getDefault().syncExec(new Runnable() {
+          public void run() {
+            ServerRegistry.getInstance().updateServer(originalName,
+                HadoopLocationWizard.this.location);
+          }
+        });
+        return this.original;
+
+      }
+    } catch (Exception e) {
+      e.printStackTrace();
+      setMessage("Invalid server location values", IMessageProvider.ERROR);
+      return null;
+    }
+  }
+
+  /**
+   * Validates the current Hadoop location settings (look for Hadoop
+   * installation directory).
+   * 
+   */
+  private void testLocation() {
+    setMessage("Not implemented yet", IMessageProvider.WARNING);
+  }
+
+  /**
+   * Location is not complete (and finish button not available) until a host
+   * name is specified.
+   * 
+   * @inheritDoc
+   */
+  @Override
+  public boolean isPageComplete() {
+
+    {
+      String locName = location.getConfProp(ConfProp.PI_LOCATION_NAME);
+      if ((locName == null) || (locName.length() == 0)
+          || locName.contains("/")) {
+
+        setMessage("Bad location name: "
+            + "the location name should not contain "
+            + "any character prohibited in a file name.", WARNING);
+
+        return false;
+      }
+    }
+
+    {
+      String master = location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+      if ((master == null) || (master.length() == 0)) {
+
+        setMessage("Bad master host name: "
+            + "the master host name refers to the machine "
+            + "that runs the Job tracker.", WARNING);
+
+        return false;
+      }
+    }
+
+    {
+      String jobTracker = location.getConfProp(ConfProp.JOB_TRACKER_URI);
+      String[] strs = jobTracker.split(":");
+      boolean ok = (strs.length == 2);
+      if (ok) {
+        try {
+          int port = Integer.parseInt(strs[1]);
+          ok = (port >= 0) && (port < 65536);
+        } catch (NumberFormatException nfe) {
+          ok = false;
+        }
+      }
+      if (!ok) {
+        setMessage("The job tracker information ("
+            + ConfProp.JOB_TRACKER_URI.name + ") is invalid. "
+            + "This usually looks like \"host:port\"", WARNING);
+        return false;
+      }
+    }
+
+    {
+      String fsDefaultURI = location.getConfProp(ConfProp.FS_DEFAULT_URI);
+      try {
+        URI uri = new URI(fsDefaultURI);
+      } catch (URISyntaxException e) {
+
+        setMessage("The default file system URI is invalid. "
+            + "This usually looks like \"hdfs://host:port/\" "
+            + "or \"file:///dir/\"", WARNING);
+      }
+    }
+
+    setMessage("Define the location of a Hadoop infrastructure "
+        + "for running MapReduce applications.");
+    return true;
+  }
+
+  /**
+   * Create the wizard
+   */
+  /* @inheritDoc */
+  public void createControl(Composite parent) {
+    setTitle("Define Hadoop location");
+    setDescription("Define the location of a Hadoop infrastructure "
+        + "for running MapReduce applications.");
+
+    Composite panel = new Composite(parent, SWT.FILL);
+    GridLayout glayout = new GridLayout(2, false);
+    panel.setLayout(glayout);
+
+    TabMediator mediator = new TabMediator(panel);
+    {
+      GridData gdata = new GridData(GridData.FILL_BOTH);
+      gdata.horizontalSpan = 2;
+      mediator.folder.setLayoutData(gdata);
+    }
+    this.setControl(panel /* mediator.folder */);
+    {
+      final Button btn = new Button(panel, SWT.NONE);
+      btn.setText("&Load from file");
+      btn.setEnabled(false);
+      btn.setToolTipText("Not yet implemented");
+      btn.addListener(SWT.Selection, new Listener() {
+        public void handleEvent(Event e) {
+          // TODO
+        }
+      });
+    }
+    {
+      final Button validate = new Button(panel, SWT.NONE);
+      validate.setText("&Validate location");
+      validate.setEnabled(false);
+      validate.setToolTipText("Not yet implemented");
+      validate.addListener(SWT.Selection, new Listener() {
+        public void handleEvent(Event e) {
+          testLocation();
+        }
+      });
+    }
+  }
+
+  private interface TabListener {
+    void notifyChange(ConfProp prop, String propValue);
+  }
+
+  /*
+   * Mediator pattern to keep tabs synchronized with each other and with the
+   * location state.
+   */
+
+  private class TabMediator {
+    TabFolder folder;
+
+    private Set<TabListener> tabs = new HashSet<TabListener>();
+
+    TabMediator(Composite parent) {
+      folder = new TabFolder(parent, SWT.NONE);
+      tabs.add(new TabMain(this));
+      tabs.add(new TabAdvanced(this));
+    }
+
+    /**
+     * Access to current configuration settings
+     * 
+     * @param propName the property name
+     * @return the current property value
+     */
+    String get(String propName) {
+      return location.getConfProp(propName);
+    }
+
+    String get(ConfProp prop) {
+      return location.getConfProp(prop);
+    }
+
+    /**
+     * Implements change notifications from any tab: update the location
+     * state and other tabs
+     * 
+     * @param source origin of the notification (one of the tree tabs)
+     * @param propName modified property
+     * @param propValue new value
+     */
+    void notifyChange(TabListener source, final ConfProp prop,
+        final String propValue) {
+      // Ignore notification when no change
+      String oldValue = location.getConfProp(prop);
+      if ((oldValue != null) && oldValue.equals(propValue))
+        return;
+
+      location.setConfProp(prop, propValue);
+      Display.getDefault().syncExec(new Runnable() {
+        public void run() {
+          getContainer().updateButtons();
+        }
+      });
+
+      this.fireChange(source, prop, propValue);
+
+      /*
+       * Now we deal with dependencies between settings
+       */
+      final String jobTrackerHost =
+          location.getConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+      final String jobTrackerPort =
+          location.getConfProp(ConfProp.PI_JOB_TRACKER_PORT);
+      final String nameNodeHost =
+          location.getConfProp(ConfProp.PI_NAME_NODE_HOST);
+      final String nameNodePort =
+          location.getConfProp(ConfProp.PI_NAME_NODE_PORT);
+      final boolean colocate =
+          location.getConfProp(ConfProp.PI_COLOCATE_MASTERS)
+              .equalsIgnoreCase("yes");
+      final String jobTrackerURI =
+          location.getConfProp(ConfProp.JOB_TRACKER_URI);
+      final String fsDefaultURI =
+          location.getConfProp(ConfProp.FS_DEFAULT_URI);
+      final String socksServerURI =
+          location.getConfProp(ConfProp.SOCKS_SERVER);
+      final boolean socksProxyEnable =
+          location.getConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE)
+              .equalsIgnoreCase("yes");
+      final String socksProxyHost =
+          location.getConfProp(ConfProp.PI_SOCKS_PROXY_HOST);
+      final String socksProxyPort =
+          location.getConfProp(ConfProp.PI_SOCKS_PROXY_PORT);
+
+      Display.getDefault().syncExec(new Runnable() {
+        public void run() {
+          switch (prop) {
+            case PI_JOB_TRACKER_HOST: {
+              if (colocate)
+                notifyChange(null, ConfProp.PI_NAME_NODE_HOST,
+                    jobTrackerHost);
+              String newJobTrackerURI =
+                  String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+              notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+              break;
+            }
+            case PI_JOB_TRACKER_PORT: {
+              String newJobTrackerURI =
+                  String.format("%s:%s", jobTrackerHost, jobTrackerPort);
+              notifyChange(null, ConfProp.JOB_TRACKER_URI, newJobTrackerURI);
+              break;
+            }
+            case PI_NAME_NODE_HOST: {
+              String newHDFSURI =
+                  String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+              notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+
+              // Break colocation if someone force the DFS Master
+              if (!colocate && !nameNodeHost.equals(jobTrackerHost))
+                notifyChange(null, ConfProp.PI_COLOCATE_MASTERS, "no");
+              break;
+            }
+            case PI_NAME_NODE_PORT: {
+              String newHDFSURI =
+                  String.format("hdfs://%s:%s/", nameNodeHost, nameNodePort);
+              notifyChange(null, ConfProp.FS_DEFAULT_URI, newHDFSURI);
+              break;
+            }
+            case PI_SOCKS_PROXY_HOST: {
+              String newSocksProxyURI =
+                  String.format("%s:%s", socksProxyHost, socksProxyPort);
+              notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+              break;
+            }
+            case PI_SOCKS_PROXY_PORT: {
+              String newSocksProxyURI =
+                  String.format("%s:%s", socksProxyHost, socksProxyPort);
+              notifyChange(null, ConfProp.SOCKS_SERVER, newSocksProxyURI);
+              break;
+            }
+            case JOB_TRACKER_URI: {
+              String[] strs = jobTrackerURI.split(":", 2);
+              String host = strs[0];
+              String port = (strs.length == 2) ? strs[1] : "";
+              notifyChange(null, ConfProp.PI_JOB_TRACKER_HOST, host);
+              notifyChange(null, ConfProp.PI_JOB_TRACKER_PORT, port);
+              break;
+            }
+            case FS_DEFAULT_URI: {
+              try {
+                URI uri = new URI(fsDefaultURI);
+                if (uri.getScheme().equals("hdfs")) {
+                  String host = uri.getHost();
+                  String port = Integer.toString(uri.getPort());
+                  notifyChange(null, ConfProp.PI_NAME_NODE_HOST, host);
+                  notifyChange(null, ConfProp.PI_NAME_NODE_PORT, port);
+                }
+              } catch (URISyntaxException use) {
+                // Ignore the update!
+              }
+              break;
+            }
+            case SOCKS_SERVER: {
+              String[] strs = socksServerURI.split(":", 2);
+              String host = strs[0];
+              String port = (strs.length == 2) ? strs[1] : "";
+              notifyChange(null, ConfProp.PI_SOCKS_PROXY_HOST, host);
+              notifyChange(null, ConfProp.PI_SOCKS_PROXY_PORT, port);
+              break;
+            }
+            case PI_COLOCATE_MASTERS: {
+              if (colocate)
+                notifyChange(null, ConfProp.PI_NAME_NODE_HOST,
+                    jobTrackerHost);
+              break;
+            }
+            case PI_SOCKS_PROXY_ENABLE: {
+              if (socksProxyEnable) {
+                notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT,
+                    "org.apache.hadoop.net.SocksSocketFactory");
+              } else {
+                notifyChange(null, ConfProp.SOCKET_FACTORY_DEFAULT,
+                "org.apache.hadoop.net.StandardSocketFactory");
+              }
+              break;
+            }
+          }
+        }
+      });
+
+    }
+
+    /**
+     * Change notifications on properties (by name). A property might not be
+     * reflected as a ConfProp enum. If it is, the notification is forwarded
+     * to the ConfProp notifyChange method. If not, it is processed here.
+     * 
+     * @param source
+     * @param propName
+     * @param propValue
+     */
+    void notifyChange(TabListener source, String propName, String propValue) {
+
+      ConfProp prop = ConfProp.getByName(propName);
+      if (prop != null)
+        notifyChange(source, prop, propValue);
+
+      location.setConfProp(propName, propValue);
+    }
+
+    /**
+     * Broadcast a property change to all registered tabs. If a tab is
+     * identified as the source of the change, this tab will not be notified.
+     * 
+     * @param source TODO
+     * @param prop
+     * @param value
+     */
+    private void fireChange(TabListener source, ConfProp prop, String value) {
+      for (TabListener tab : tabs) {
+        if (tab != source)
+          tab.notifyChange(prop, value);
+      }
+    }
+
+  }
+
+  /**
+   * Create a SWT Text component for the given {@link ConfProp} text
+   * configuration property.
+   * 
+   * @param listener
+   * @param parent
+   * @param prop
+   * @return
+   */
+  private Text createConfText(ModifyListener listener, Composite parent,
+      ConfProp prop) {
+
+    Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+    GridData data = new GridData(GridData.FILL_HORIZONTAL);
+    text.setLayoutData(data);
+    text.setData("hProp", prop);
+    text.setText(location.getConfProp(prop));
+    text.addModifyListener(listener);
+
+    return text;
+  }
+
+  /**
+   * Create a SWT Checked Button component for the given {@link ConfProp}
+   * boolean configuration property.
+   * 
+   * @param listener
+   * @param parent
+   * @param prop
+   * @return
+   */
+  private Button createConfCheckButton(SelectionListener listener,
+      Composite parent, ConfProp prop, String text) {
+
+    Button button = new Button(parent, SWT.CHECK);
+    button.setText(text);
+    button.setData("hProp", prop);
+    button.setSelection(location.getConfProp(prop).equalsIgnoreCase("yes"));
+    button.addSelectionListener(listener);
+
+    return button;
+  }
+
+  /**
+   * Create editor entry for the given configuration property. The editor is
+   * a couple (Label, Text).
+   * 
+   * @param listener the listener to trigger on property change
+   * @param parent the SWT parent container
+   * @param prop the property to create an editor for
+   * @param labelText a label (null will defaults to the property name)
+   * 
+   * @return a SWT Text field
+   */
+  private Text createConfLabelText(ModifyListener listener,
+      Composite parent, ConfProp prop, String labelText) {
+
+    Label label = new Label(parent, SWT.NONE);
+    if (labelText == null)
+      labelText = prop.name;
+    label.setText(labelText);
+
+    return createConfText(listener, parent, prop);
+  }
+
+  /**
+   * Create an editor entry for the given configuration name
+   * 
+   * @param listener the listener to trigger on property change
+   * @param parent the SWT parent container
+   * @param propName the name of the property to create an editor for
+   * @param labelText a label (null will defaults to the property name)
+   * 
+   * @return a SWT Text field
+   */
+  private Text createConfNameEditor(ModifyListener listener,
+      Composite parent, String propName, String labelText) {
+
+    {
+      ConfProp prop = ConfProp.getByName(propName);
+      if (prop != null)
+        return createConfLabelText(listener, parent, prop, labelText);
+    }
+
+    Label label = new Label(parent, SWT.NONE);
+    if (labelText == null)
+      labelText = propName;
+    label.setText(labelText);
+
+    Text text = new Text(parent, SWT.SINGLE | SWT.BORDER);
+    GridData data = new GridData(GridData.FILL_HORIZONTAL);
+    text.setLayoutData(data);
+    text.setData("hPropName", propName);
+    text.setText(location.getConfProp(propName));
+    text.addModifyListener(listener);
+
+    return text;
+  }
+
+  /**
+   * Main parameters of the Hadoop location:
+   * <li> host and port of the Map/Reduce master (Job tracker)
+   * <li> host and port of the DFS master (Name node)
+   * <li> SOCKS proxy
+   */
+  private class TabMain implements TabListener, ModifyListener,
+      SelectionListener {
+
+    TabMediator mediator;
+
+    Text locationName;
+
+    Text textJTHost;
+
+    Text textNNHost;
+
+    Button colocateMasters;
+
+    Text textJTPort;
+
+    Text textNNPort;
+
+    Text userName;
+
+    Button useSocksProxy;
+
+    Text socksProxyHost;
+
+    Text socksProxyPort;
+
+    TabMain(TabMediator mediator) {
+      this.mediator = mediator;
+      TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+      tab.setText("General");
+      tab.setToolTipText("General location parameters");
+      tab.setImage(circle);
+      tab.setControl(createControl(mediator.folder));
+    }
+
+    private Control createControl(Composite parent) {
+
+      Composite panel = new Composite(parent, SWT.FILL);
+      panel.setLayout(new GridLayout(2, false));
+
+      GridData data;
+
+      /*
+       * Location name
+       */
+      {
+        Composite subpanel = new Composite(panel, SWT.FILL);
+        subpanel.setLayout(new GridLayout(2, false));
+        data = new GridData();
+        data.horizontalSpan = 2;
+        data.horizontalAlignment = SWT.FILL;
+        subpanel.setLayoutData(data);
+
+        locationName =
+            createConfLabelText(this, subpanel, ConfProp.PI_LOCATION_NAME,
+                "&Location name:");
+      }
+
+      /*
+       * Map/Reduce group
+       */
+      {
+        Group groupMR = new Group(panel, SWT.SHADOW_NONE);
+        groupMR.setText("Map/Reduce Master");
+        groupMR.setToolTipText("Address of the Map/Reduce master node "
+            + "(the Job Tracker).");
+        GridLayout layout = new GridLayout(2, false);
+        groupMR.setLayout(layout);
+        data = new GridData();
+        data.verticalAlignment = SWT.FILL;
+        data.horizontalAlignment = SWT.CENTER;
+        data.widthHint = 250;
+        groupMR.setLayoutData(data);
+
+        // Job Tracker host
+        Label label = new Label(groupMR, SWT.NONE);
+        label.setText("Host:");
+        data =
+            new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+        label.setLayoutData(data);
+
+        textJTHost =
+            createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_HOST);
+        data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+        textJTHost.setLayoutData(data);
+
+        // Job Tracker port
+        label = new Label(groupMR, SWT.NONE);
+        label.setText("Port:");
+        data =
+            new GridData(GridData.BEGINNING, GridData.CENTER, false, true);
+        label.setLayoutData(data);
+
+        textJTPort =
+            createConfText(this, groupMR, ConfProp.PI_JOB_TRACKER_PORT);
+        data = new GridData(GridData.FILL, GridData.CENTER, true, true);
+        textJTPort.setLayoutData(data);
+      }
+
+      /*
+       * DFS group
+       */
+      {
+        Group groupDFS = new Group(panel, SWT.SHADOW_NONE);
+        groupDFS.setText("DFS Master");
+        groupDFS.setToolTipText("Address of the Distributed FileSystem "
+            + "master node (the Name Node).");
+        GridLayout layout = new GridLayout(2, false);
+        groupDFS.setLayout(layout);
+        data = new GridData();
+        data.horizontalAlignment = SWT.CENTER;
+        data.widthHint = 250;
+        groupDFS.setLayoutData(data);
+
+        colocateMasters =
+            createConfCheckButton(this, groupDFS,
+                ConfProp.PI_COLOCATE_MASTERS, "Use M/R Master host");
+        data = new GridData();
+        data.horizontalSpan = 2;
+        colocateMasters.setLayoutData(data);
+
+        // Job Tracker host
+        Label label = new Label(groupDFS, SWT.NONE);
+        data = new GridData();
+        label.setText("Host:");
+        label.setLayoutData(data);
+
+        textNNHost =
+            createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_HOST);
+
+        // Job Tracker port
+        label = new Label(groupDFS, SWT.NONE);
+        data = new GridData();
+        label.setText("Port:");
+        label.setLayoutData(data);
+
+        textNNPort =
+            createConfText(this, groupDFS, ConfProp.PI_NAME_NODE_PORT);
+      }
+
+      {
+        Composite subpanel = new Composite(panel, SWT.FILL);
+        subpanel.setLayout(new GridLayout(2, false));
+        data = new GridData();
+        data.horizontalSpan = 2;
+        data.horizontalAlignment = SWT.FILL;
+        subpanel.setLayoutData(data);
+
+        userName =
+            createConfLabelText(this, subpanel, ConfProp.PI_USER_NAME,
+                "&User name:");
+      }
+
+      // SOCKS proxy group
+      {
+        Group groupSOCKS = new Group(panel, SWT.SHADOW_NONE);
+        groupSOCKS.setText("SOCKS proxy");
+        groupSOCKS.setToolTipText("Address of the SOCKS proxy to use "
+            + "to connect to the infrastructure.");
+        GridLayout layout = new GridLayout(2, false);
+        groupSOCKS.setLayout(layout);
+        data = new GridData();
+        data.horizontalAlignment = SWT.CENTER;
+        data.horizontalSpan = 2;
+        data.widthHint = 250;
+        groupSOCKS.setLayoutData(data);
+
+        useSocksProxy =
+            createConfCheckButton(this, groupSOCKS,
+                ConfProp.PI_SOCKS_PROXY_ENABLE, "Enable SOCKS proxy");
+        data = new GridData();
+        data.horizontalSpan = 2;
+        useSocksProxy.setLayoutData(data);
+
+        // SOCKS proxy host
+        Label label = new Label(groupSOCKS, SWT.NONE);
+        data = new GridData();
+        label.setText("Host:");
+        label.setLayoutData(data);
+
+        socksProxyHost =
+            createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_HOST);
+
+        // SOCKS proxy port
+        label = new Label(groupSOCKS, SWT.NONE);
+        data = new GridData();
+        label.setText("Port:");
+        label.setLayoutData(data);
+
+        socksProxyPort =
+            createConfText(this, groupSOCKS, ConfProp.PI_SOCKS_PROXY_PORT);
+      }
+
+      // Update the state of all widgets according to the current values!
+      reloadConfProp(ConfProp.PI_COLOCATE_MASTERS);
+      reloadConfProp(ConfProp.PI_SOCKS_PROXY_ENABLE);
+      reloadConfProp(ConfProp.PI_JOB_TRACKER_HOST);
+
+      return panel;
+    }
+
+    /**
+     * Reload the given configuration property value
+     * 
+     * @param prop
+     */
+    private void reloadConfProp(ConfProp prop) {
+      this.notifyChange(prop, location.getConfProp(prop));
+    }
+
+    public void notifyChange(ConfProp prop, String propValue) {
+      switch (prop) {
+        case PI_JOB_TRACKER_HOST: {
+          textJTHost.setText(propValue);
+          break;
+        }
+        case PI_JOB_TRACKER_PORT: {
+          textJTPort.setText(propValue);
+          break;
+        }
+        case PI_LOCATION_NAME: {
+          locationName.setText(propValue);
+          break;
+        }
+        case PI_USER_NAME: {
+          userName.setText(propValue);
+          break;
+        }
+        case PI_COLOCATE_MASTERS: {
+          if (colocateMasters != null) {
+            boolean colocate = propValue.equalsIgnoreCase("yes");
+            colocateMasters.setSelection(colocate);
+            if (textNNHost != null) {
+              textNNHost.setEnabled(!colocate);
+            }
+          }
+          break;
+        }
+        case PI_NAME_NODE_HOST: {
+          textNNHost.setText(propValue);
+          break;
+        }
+        case PI_NAME_NODE_PORT: {
+          textNNPort.setText(propValue);
+          break;
+        }
+        case PI_SOCKS_PROXY_ENABLE: {
+          if (useSocksProxy != null) {
+            boolean useProxy = propValue.equalsIgnoreCase("yes");
+            useSocksProxy.setSelection(useProxy);
+            if (socksProxyHost != null)
+              socksProxyHost.setEnabled(useProxy);
+            if (socksProxyPort != null)
+              socksProxyPort.setEnabled(useProxy);
+          }
+          break;
+        }
+        case PI_SOCKS_PROXY_HOST: {
+          socksProxyHost.setText(propValue);
+          break;
+        }
+        case PI_SOCKS_PROXY_PORT: {
+          socksProxyPort.setText(propValue);
+          break;
+        }
+      }
+    }
+
+    /* @inheritDoc */
+    public void modifyText(ModifyEvent e) {
+      final Text text = (Text) e.widget;
+      final ConfProp prop = (ConfProp) text.getData("hProp");
+      Display.getDefault().syncExec(new Runnable() {
+        public void run() {
+          mediator.notifyChange(TabMain.this, prop, text.getText());
+        }
+      });
+    }
+
+    /* @inheritDoc */
+    public void widgetDefaultSelected(SelectionEvent e) {
+      this.widgetSelected(e);
+    }
+
+    /* @inheritDoc */
+    public void widgetSelected(SelectionEvent e) {
+      final Button button = (Button) e.widget;
+      final ConfProp prop = (ConfProp) button.getData("hProp");
+
+      Display.getDefault().syncExec(new Runnable() {
+        public void run() {
+          // We want to receive the update also!
+          mediator.notifyChange(null, prop, button.getSelection() ? "yes"
+              : "no");
+        }
+      });
+    }
+
+  }
+
+  private class TabAdvanced implements TabListener, ModifyListener {
+    TabMediator mediator;
+
+    private Composite panel;
+
+    private Map<String, Text> textMap = new TreeMap<String, Text>();
+
+    TabAdvanced(TabMediator mediator) {
+      this.mediator = mediator;
+      TabItem tab = new TabItem(mediator.folder, SWT.NONE);
+      tab.setText("Advanced parameters");
+      tab.setToolTipText("Access to advanced Hadoop parameters");
+      tab.setImage(circle);
+      tab.setControl(createControl(mediator.folder));
+
+    }
+
+    private Control createControl(Composite parent) {
+      ScrolledComposite sc =
+          new ScrolledComposite(parent, SWT.BORDER | SWT.H_SCROLL
+              | SWT.V_SCROLL);
+
+      panel = new Composite(sc, SWT.NONE);
+      sc.setContent(panel);
+
+      sc.setExpandHorizontal(true);
+      sc.setExpandVertical(true);
+
+      sc.setMinSize(640, 480);
+
+      GridLayout layout = new GridLayout();
+      layout.numColumns = 2;
+      layout.makeColumnsEqualWidth = false;
+      panel.setLayout(layout);
+      panel.setLayoutData(new GridData(GridData.FILL, GridData.FILL, true,
+          true, 1, 1));
+
+      // Sort by property name
+      Configuration config = location.getConfiguration();
+      SortedMap<String, String> map = new TreeMap<String, String>();
+      Iterator<Entry<String, String>> it = config.iterator();
+      while (it.hasNext()) {
+        Entry<String, String> entry = it.next();
+        map.put(entry.getKey(), entry.getValue());
+      }
+
+      for (Entry<String, String> entry : map.entrySet()) {
+        Text text = createConfNameEditor(this, panel, entry.getKey(), null);
+        textMap.put(entry.getKey(), text);
+      }
+
+      sc.setMinSize(panel.computeSize(SWT.DEFAULT, SWT.DEFAULT));
+
+      return sc;
+    }
+
+    public void notifyChange(ConfProp prop, final String propValue) {
+      Text text = textMap.get(prop.name);
+      text.setText(propValue);
+    }
+
+    public void modifyText(ModifyEvent e) {
+      final Text text = (Text) e.widget;
+      Object hProp = text.getData("hProp");
+      final ConfProp prop = (hProp != null) ? (ConfProp) hProp : null;
+      Object hPropName = text.getData("hPropName");
+      final String propName =
+          (hPropName != null) ? (String) hPropName : null;
+
+      Display.getDefault().syncExec(new Runnable() {
+        public void run() {
+          if (prop != null)
+            mediator.notifyChange(TabAdvanced.this, prop, text.getText());
+          else
+            mediator
+                .notifyChange(TabAdvanced.this, propName, text.getText());
+        }
+      });
+    }
+  }
+
+}