PIG-2768: Fix org.apache.hadoop.conf.Configuration deprecation warnings for Hadoop 23 (rohini)

git-svn-id: https://svn.apache.org/repos/asf/pig/trunk@1746745 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES.txt b/CHANGES.txt
index 012909f..3b27f37 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -23,6 +23,8 @@
 INCOMPATIBLE CHANGES
  
 IMPROVEMENTS
+
+PIG-2768: Fix org.apache.hadoop.conf.Configuration deprecation warnings for Hadoop 23 (rohini)
  
 OPTIMIZATIONS
  
diff --git a/shims/test/hadoop20/org/apache/pig/test/MiniCluster.java b/shims/test/hadoop20/org/apache/pig/test/MiniCluster.java
index c44ed44..2ceeaad 100644
--- a/shims/test/hadoop20/org/apache/pig/test/MiniCluster.java
+++ b/shims/test/hadoop20/org/apache/pig/test/MiniCluster.java
@@ -22,6 +22,7 @@
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.pig.ExecType;
@@ -81,7 +82,7 @@
 
             // Set the system properties needed by Pig
             System.setProperty("cluster", m_conf.get(MRConfiguration.JOB_TRACKER));
-            System.setProperty("namenode", m_conf.get("fs.default.name"));
+            System.setProperty("namenode", m_conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
             System.setProperty("junit.hadoop.conf", CONF_DIR.getPath());
         } catch (IOException e) {
             throw new RuntimeException(e);
diff --git a/shims/test/hadoop23/org/apache/pig/test/MiniCluster.java b/shims/test/hadoop23/org/apache/pig/test/MiniCluster.java
index ac670ab..17bb866 100644
--- a/shims/test/hadoop23/org/apache/pig/test/MiniCluster.java
+++ b/shims/test/hadoop23/org/apache/pig/test/MiniCluster.java
@@ -22,6 +22,7 @@
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
@@ -93,7 +94,7 @@
             m_mr_conf = new Configuration(m_mr.getConfig());
 
             m_conf = m_mr_conf;
-            m_conf.set("fs.default.name", m_dfs_conf.get("fs.default.name"));
+            m_conf.set(FileSystem.FS_DEFAULT_NAME_KEY, m_dfs_conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
             m_conf.unset(MRConfiguration.JOB_CACHE_FILES);
 
             m_conf.setInt(MRConfiguration.IO_SORT_MB, 200);
@@ -110,11 +111,10 @@
                     new Path("/pigtest/conf/hadoop-site.xml"));
             DistributedCache.addFileToClassPath(new Path("/pigtest/conf/hadoop-site.xml"), m_conf);
 
-            System.err.println("XXX: Setting fs.default.name to: " + m_dfs_conf.get("fs.default.name"));
+            System.err.println("XXX: Setting " + FileSystem.FS_DEFAULT_NAME_KEY + " to: " + m_conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
             // Set the system properties needed by Pig
             System.setProperty("cluster", m_conf.get(MRConfiguration.JOB_TRACKER));
-            //System.setProperty("namenode", m_dfs_conf.get("fs.default.name"));
-            System.setProperty("namenode", m_conf.get("fs.default.name"));
+            System.setProperty("namenode", m_conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
             System.setProperty("junit.hadoop.conf", CONF_DIR.getPath());
         } catch (IOException e) {
             throw new RuntimeException(e);
diff --git a/src/org/apache/pig/backend/hadoop/datastorage/HDataStorage.java b/src/org/apache/pig/backend/hadoop/datastorage/HDataStorage.java
index 8161ac2..132cc11 100644
--- a/src/org/apache/pig/backend/hadoop/datastorage/HDataStorage.java
+++ b/src/org/apache/pig/backend/hadoop/datastorage/HDataStorage.java
@@ -18,20 +18,20 @@
 

 package org.apache.pig.backend.hadoop.datastorage;

 

-import java.net.URI;

 import java.io.IOException;

+import java.net.URI;

 import java.util.ArrayList;

-import java.util.List;

-import java.util.Properties;

 import java.util.Enumeration;

-import java.util.Map;

 import java.util.HashMap;

+import java.util.List;

+import java.util.Map;

+import java.util.Properties;

 

+import org.apache.hadoop.conf.Configuration;

 import org.apache.hadoop.fs.FileStatus;

 import org.apache.hadoop.fs.FileSystem;

 import org.apache.hadoop.fs.Path;

 import org.apache.hadoop.hdfs.DistributedFileSystem;

-import org.apache.hadoop.conf.Configuration;

 import org.apache.pig.PigException;

 import org.apache.pig.backend.datastorage.ContainerDescriptor;

 import org.apache.pig.backend.datastorage.DataStorage;

@@ -40,8 +40,6 @@
 

 public class HDataStorage implements DataStorage {

 

-    private static final String FILE_SYSTEM_LOCATION = "fs.default.name";

-

     private FileSystem fs;

     private Configuration configuration;

     private Properties properties;

@@ -58,9 +56,10 @@
         init();

     }

 

+    @Override

     public void init() {

         // check if name node is set, if not we set local as fail back

-        String nameNode = this.properties.getProperty(FILE_SYSTEM_LOCATION);

+        String nameNode = this.properties.getProperty(FileSystem.FS_DEFAULT_NAME_KEY);

         if (nameNode == null || nameNode.length() == 0) {

             nameNode = "local";

         }

@@ -76,14 +75,17 @@
         }

     }

 

+    @Override

     public void close() throws IOException {

         fs.close();

     }

-    

+

+    @Override

     public Properties getConfiguration() {

         return this.properties;

     }

 

+    @Override

     public void updateConfiguration(Properties newConfiguration)

             throws DataStorageException {

         // TODO sgroschupf 25Feb2008 this method is never called and

@@ -92,38 +94,40 @@
         if (newConfiguration == null) {

             return;

         }

-        

+

         Enumeration<Object> newKeys = newConfiguration.keys();

-        

+

         while (newKeys.hasMoreElements()) {

             String key = (String) newKeys.nextElement();

             String value = null;

-            

+

             value = newConfiguration.getProperty(key);

-            

+

             fs.getConf().set(key,value);

         }

     }

-    

+

+    @Override

     public Map<String, Object> getStatistics() throws IOException {

         Map<String, Object> stats = new HashMap<String, Object>();

 

         long usedBytes = fs.getUsed();

         stats.put(USED_BYTES_KEY , Long.valueOf(usedBytes).toString());

-        

+

         if (fs instanceof DistributedFileSystem) {

             DistributedFileSystem dfs = (DistributedFileSystem) fs;

-            

+

             long rawCapacityBytes = dfs.getRawCapacity();

             stats.put(RAW_CAPACITY_KEY, Long.valueOf(rawCapacityBytes).toString());

-            

+

             long rawUsedBytes = dfs.getRawUsed();

             stats.put(RAW_USED_KEY, Long.valueOf(rawUsedBytes).toString());

         }

-        

+

         return stats;

     }

-    

+

+    @Override

     public ElementDescriptor asElement(String name) throws DataStorageException {

         if (this.isContainer(name)) {

             return new HDirectory(this, name);

@@ -132,70 +136,82 @@
             return new HFile(this, name);

         }

     }

-    

+

+    @Override

     public ElementDescriptor asElement(ElementDescriptor element)

             throws DataStorageException {

         return asElement(element.toString());

     }

-    

+

+    @Override

     public ElementDescriptor asElement(String parent,

-                                                  String child) 

+                                                  String child)

             throws DataStorageException {

         return asElement((new Path(parent, child)).toString());

     }

 

+    @Override

     public ElementDescriptor asElement(ContainerDescriptor parent,

-                                                  String child) 

+                                                  String child)

             throws DataStorageException {

         return asElement(parent.toString(), child);

     }

 

+    @Override

     public ElementDescriptor asElement(ContainerDescriptor parent,

-                                                  ElementDescriptor child) 

+                                                  ElementDescriptor child)

             throws DataStorageException {

         return asElement(parent.toString(), child.toString());

     }

 

-    public ContainerDescriptor asContainer(String name) 

+    @Override

+    public ContainerDescriptor asContainer(String name)

             throws DataStorageException {

         return new HDirectory(this, name);

     }

-    

+

+    @Override

     public ContainerDescriptor asContainer(ContainerDescriptor container)

             throws DataStorageException {

         return new HDirectory(this, container.toString());

     }

-    

+

+    @Override

     public ContainerDescriptor asContainer(String parent,

-                                                      String child) 

+                                                      String child)

             throws DataStorageException {

         return new HDirectory(this, parent, child);

     }

 

+    @Override

     public ContainerDescriptor asContainer(ContainerDescriptor parent,

-                                                      String child) 

+                                                      String child)

             throws DataStorageException {

         return new HDirectory(this, parent.toString(), child);

     }

-    

+

+    @Override

     public ContainerDescriptor asContainer(ContainerDescriptor parent,

                                                       ContainerDescriptor child)

             throws DataStorageException {

         return new HDirectory(this, parent.toString(), child.toString());

     }

-    

+

+    @Override

     public void setActiveContainer(ContainerDescriptor container) {

         fs.setWorkingDirectory(new Path(container.toString()));

     }

-    

+

+    @Override

     public ContainerDescriptor getActiveContainer() {

         return new HDirectory(this, fs.getWorkingDirectory());

     }

 

+    @Override

     public boolean isContainer(String name) throws DataStorageException {

         boolean isContainer = false;

         Path path = new Path(name);

-        

+

         try {

             if ((this.fs.exists(path)) && (! this.fs.isFile(path))) {

                 isContainer = true;

@@ -206,10 +222,11 @@
             String msg = "Unable to check name " + name;

             throw new DataStorageException(msg, errCode, PigException.REMOTE_ENVIRONMENT, e);

         }

-        

+

         return isContainer;

     }

-    

+

+    @Override

     public HPath[] asCollection(String pattern) throws DataStorageException {

         try {

             FileStatus[] paths = this.fs.globStatus(new Path(pattern));

@@ -218,7 +235,7 @@
                 return new HPath[0];

 

             List<HPath> hpaths = new ArrayList<HPath>();

-            

+

             for (int i = 0; i < paths.length; ++i) {

                 HPath hpath = (HPath)this.asElement(paths[i].getPath().toString());

                 if (!hpath.systemElement()) {

@@ -233,7 +250,7 @@
             throw new DataStorageException(msg, errCode, PigException.REMOTE_ENVIRONMENT, e);

         }

     }

-    

+

     public FileSystem getHFS() {

         return fs;

     }

diff --git a/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java b/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
index b0119f8..01dc78f 100644
--- a/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
+++ b/src/org/apache/pig/backend/hadoop/executionengine/HExecutionEngine.java
@@ -30,6 +30,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.pig.PigException;
@@ -76,8 +77,6 @@
     public static final String MAPRED_DEFAULT_SITE = "mapred-default.xml";
     public static final String YARN_DEFAULT_SITE = "yarn-default.xml";
 
-    public static final String FILE_SYSTEM_LOCATION = "fs.default.name";
-    public static final String ALTERNATIVE_FILE_SYSTEM_LOCATION = "fs.defaultFS";
     public static final String LOCAL = "local";
 
     protected PigContext pigContext;
@@ -203,8 +202,8 @@
                 properties.setProperty(MRConfiguration.FRAMEWORK_NAME, LOCAL);
             }
             properties.setProperty(MRConfiguration.JOB_TRACKER, LOCAL);
-            properties.setProperty(FILE_SYSTEM_LOCATION, "file:///");
-            properties.setProperty(ALTERNATIVE_FILE_SYSTEM_LOCATION, "file:///");
+            properties.remove("fs.default.name"); //Deprecated in Hadoop 2.x
+            properties.setProperty(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
 
             jc = getLocalConf();
             JobConf s3Jc = getS3Conf();
@@ -220,24 +219,7 @@
         HKerberos.tryKerberosKeytabLogin(jc);
 
         cluster = jc.get(MRConfiguration.JOB_TRACKER);
-        nameNode = jc.get(FILE_SYSTEM_LOCATION);
-        if (nameNode == null) {
-            nameNode = (String) pigContext.getProperties().get(ALTERNATIVE_FILE_SYSTEM_LOCATION);
-        }
-
-        if (cluster != null && cluster.length() > 0) {
-            if (!cluster.contains(":") && !cluster.equalsIgnoreCase(LOCAL)) {
-                cluster = cluster + ":50020";
-            }
-            properties.setProperty(MRConfiguration.JOB_TRACKER, cluster);
-        }
-
-        if (nameNode != null && nameNode.length() > 0) {
-            if (!nameNode.contains(":") && !nameNode.equalsIgnoreCase(LOCAL)) {
-                nameNode = nameNode + ":8020";
-            }
-            properties.setProperty(FILE_SYSTEM_LOCATION, nameNode);
-        }
+        nameNode = jc.get(FileSystem.FS_DEFAULT_NAME_KEY);
 
         LOG.info("Connecting to hadoop file system at: "
                 + (nameNode == null ? LOCAL : nameNode));
diff --git a/src/org/apache/pig/backend/hadoop/executionengine/util/MapRedUtil.java b/src/org/apache/pig/backend/hadoop/executionengine/util/MapRedUtil.java
index 981d21e..b485c56 100644
--- a/src/org/apache/pig/backend/hadoop/executionengine/util/MapRedUtil.java
+++ b/src/org/apache/pig/backend/hadoop/executionengine/util/MapRedUtil.java
@@ -70,7 +70,7 @@
     private static Log log = LogFactory.getLog(MapRedUtil.class);
     private static final TupleFactory tf = TupleFactory.getInstance();
 
-    public static final String FILE_SYSTEM_NAME = "fs.default.name";
+    public static final String FILE_SYSTEM_NAME = FileSystem.FS_DEFAULT_NAME_KEY;
 
     /**
      * Loads the key distribution sampler file
@@ -301,7 +301,7 @@
     /**
      * Returns the total number of bytes for this file, or if a directory all
      * files in the directory.
-     * 
+     *
      * @param fs FileSystem
      * @param status FileStatus
      * @param max Maximum value of total length that will trigger exit. Many