Merge remote-tracking branch 'origin/master' into hadoop-next

Change-Id: Ib37056d1f273f6efb5b87c9b48512f8b14ab497b
diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index c9b822d..8b3cc8c 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -294,11 +294,11 @@
   IMPALA_THRIFT_JAVA_VERSION=0.9.2
 fi
 
-export IMPALA_HADOOP_VERSION=${IMPALA_HADOOP_VERSION:-2.6.0-cdh5.11.0-SNAPSHOT}
-export IMPALA_HBASE_VERSION=${IMPALA_HBASE_VERSION:-1.2.0-cdh5.11.0-SNAPSHOT}
-export IMPALA_HIVE_VERSION=${IMPALA_HIVE_VERSION:-1.1.0-cdh5.11.0-SNAPSHOT}
-export IMPALA_SENTRY_VERSION=${IMPALA_SENTRY_VERSION:-1.5.1-cdh5.11.0-SNAPSHOT}
-export IMPALA_PARQUET_VERSION=${IMPALA_PARQUET_VERSION:-1.5.0-cdh5.11.0-SNAPSHOT}
+export IMPALA_HADOOP_VERSION=${IMPALA_HADOOP_VERSION:-3.0.0-alpha1-cdh6.x-SNAPSHOT}
+export IMPALA_HBASE_VERSION=${IMPALA_HBASE_VERSION:-2.0.0-cdh6.x-SNAPSHOT}
+export IMPALA_HIVE_VERSION=${IMPALA_HIVE_VERSION:-2.1.0-cdh6.x-SNAPSHOT}
+export IMPALA_SENTRY_VERSION=${IMPALA_SENTRY_VERSION:-1.5.1-cdh6.x-SNAPSHOT}
+export IMPALA_PARQUET_VERSION=${IMPALA_PARQUET_VERSION:-1.5.0-cdh6.x-SNAPSHOT}
 export IMPALA_LLAMA_MINIKDC_VERSION=${IMPALA_LLAMA_MINIKDC_VERSION:-1.0.0}
 
 export IMPALA_FE_DIR="$IMPALA_HOME/fe"
diff --git a/common/thrift/TCLIService.thrift b/common/thrift/TCLIService.thrift
index f95e2f8..afed40e 100644
--- a/common/thrift/TCLIService.thrift
+++ b/common/thrift/TCLIService.thrift
@@ -32,7 +32,7 @@
 // * Service names begin with the letter "T", use a capital letter for each
 //   new word (with no underscores), and end with the word "Service".
 
-namespace java org.apache.hive.service.cli.thrift
+namespace java org.apache.hive.service.rpc.thrift
 namespace cpp apache.hive.service.cli.thrift
 
 // List of protocol versions. A new token should be
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
index 8993acb..8fb6875 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
@@ -167,7 +167,7 @@
 
   public void analyze(Analyzer analyzer) throws AnalysisException {
     // Check whether the column name meets the Metastore's requirements.
-    if (!MetaStoreUtils.validateName(colName_)) {
+    if (!MetaStoreUtils.validateName(colName_, null)) {
       throw new AnalysisException("Invalid column/field name: " + colName_);
     }
     if (typeDef_ != null) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
index 30ca223..cc788f5 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
@@ -54,7 +54,7 @@
 
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!MetaStoreUtils.validateName(dataSrcName_)) {
+    if (!MetaStoreUtils.validateName(dataSrcName_, null)) {
       throw new AnalysisException("Invalid data source name: " + dataSrcName_);
     }
     if (!ifNotExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) != null) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
index a9fbf20..f603585 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
@@ -82,7 +82,7 @@
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     // Check whether the db name meets the Metastore's requirements.
-    if (!MetaStoreUtils.validateName(dbName_)) {
+    if (!MetaStoreUtils.validateName(dbName_, null)) {
       throw new AnalysisException("Invalid database name: " + dbName_);
     }
 
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
index e3dd9a8..90596db 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
@@ -39,7 +39,7 @@
 
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!MetaStoreUtils.validateName(dataSrcName_) ||
+    if (!MetaStoreUtils.validateName(dataSrcName_, null) ||
         (!ifExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) == null)) {
       throw new AnalysisException(Analyzer.DATA_SRC_DOES_NOT_EXIST_ERROR_MSG +
           dataSrcName_);
diff --git a/fe/src/main/java/org/apache/impala/analysis/Subquery.java b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
index afb839e..8a94a60 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Subquery.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
@@ -133,7 +133,7 @@
       Expr expr = stmtResultExprs.get(i);
       String fieldName = null;
       // Check if the label meets the Metastore's requirements.
-      if (MetaStoreUtils.validateName(labels.get(i))) {
+      if (MetaStoreUtils.validateName(labels.get(i), null)) {
         fieldName = labels.get(i);
         // Make sure the field names are unique.
         if (!hasUniqueLabels) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableName.java b/fe/src/main/java/org/apache/impala/analysis/TableName.java
index 297948e..3e0cc1a 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableName.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableName.java
@@ -54,12 +54,12 @@
    */
   public void analyze() throws AnalysisException {
     if (db_ != null) {
-      if (!MetaStoreUtils.validateName(db_)) {
+      if (!MetaStoreUtils.validateName(db_, null)) {
         throw new AnalysisException("Invalid database name: " + db_);
       }
     }
     Preconditions.checkNotNull(tbl_);
-    if (!MetaStoreUtils.validateName(tbl_)) {
+    if (!MetaStoreUtils.validateName(tbl_, null)) {
       throw new AnalysisException("Invalid table/view name: " + tbl_);
     }
   }
diff --git a/fe/src/main/java/org/apache/impala/analysis/TypeDef.java b/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
index 76e6a8f..62d6539 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
@@ -133,7 +133,7 @@
                 f.getName(), toSql()));
       }
       // Check whether the column name meets the Metastore's requirements.
-      if (!MetaStoreUtils.validateName(f.getName().toLowerCase())) {
+      if (!MetaStoreUtils.validateName(f.getName().toLowerCase(), null)) {
         throw new AnalysisException("Invalid struct field name: " + f.getName());
       }
     }
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 904c90f..79e5072 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -33,7 +33,6 @@
 import org.apache.avro.Schema;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
@@ -48,9 +47,6 @@
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.impala.analysis.ColumnDef;
 import org.apache.impala.analysis.Expr;
 import org.apache.impala.analysis.LiteralExpr;
@@ -71,7 +67,6 @@
 import org.apache.impala.thrift.TColumn;
 import org.apache.impala.thrift.THdfsFileBlock;
 import org.apache.impala.thrift.THdfsPartition;
-import org.apache.impala.thrift.THdfsPartitionLocation;
 import org.apache.impala.thrift.THdfsTable;
 import org.apache.impala.thrift.TNetworkAddress;
 import org.apache.impala.thrift.TPartitionKeyValue;
@@ -90,45 +85,15 @@
 import org.apache.impala.util.MetaStoreUtil;
 import org.apache.impala.util.TAccessLevelUtil;
 import org.apache.impala.util.TResultRowBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
-import org.apache.avro.Schema;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.BlockStorageLocation;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.VolumeId;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
 /**
  * Internal representation of table-related metadata of a file-resident table on a
  * Hadoop filesystem. The table data can be accessed through libHDFS (which is more of
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index f878b12..35c78fa 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -772,8 +772,7 @@
       }
       PartitionStatsUtil.partStatsToParameters(partitionStats, partition);
       partition.putToParameters(StatsSetupConst.ROW_COUNT, String.valueOf(numRows));
-      partition.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
-          StatsSetupConst.TRUE);
+      partition.putToParameters(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
       ++numTargetedPartitions;
       modifiedParts.add(partition);
     }
@@ -791,8 +790,7 @@
     // Update the table's ROW_COUNT parameter.
     msTbl.putToParameters(StatsSetupConst.ROW_COUNT,
         String.valueOf(params.getTable_stats().num_rows));
-    msTbl.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
-        StatsSetupConst.TRUE);
+    msTbl.putToParameters(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
     return numTargetedPartitions;
   }
 
@@ -1909,7 +1907,7 @@
         cacheIds = Lists.<Long>newArrayList(id);
         // Update the partition metadata to include the cache directive id.
         msClient.getHiveClient().alter_partition(partition.getDbName(),
-            partition.getTableName(), partition);
+            partition.getTableName(), partition, null);
       }
       updateLastDdlTime(msTbl, msClient);
     } catch (AlreadyExistsException e) {
@@ -2488,7 +2486,7 @@
         }
         // Update the partition metadata to include the cache directive id.
         msClient.getHiveClient().alter_partitions(tableName.getDb(),
-            tableName.getTbl(), hmsPartitions);
+            tableName.getTbl(), hmsPartitions, null);
       }
       updateLastDdlTime(msTbl, msClient);
     } catch (AlreadyExistsException e) {
@@ -2631,7 +2629,7 @@
     try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
       TableName tableName = tbl.getTableName();
       msClient.getHiveClient().alter_partition(
-          tableName.getDb(), tableName.getTbl(), partition.toHmsPartition());
+          tableName.getDb(), tableName.getTbl(), partition.toHmsPartition(), null);
       org.apache.hadoop.hive.metastore.api.Table msTbl =
           tbl.getMetaStoreTable().deepCopy();
       updateLastDdlTime(msTbl, msClient);
@@ -2787,7 +2785,7 @@
         try {
           // Alter partitions in bulk.
           msClient.getHiveClient().alter_partitions(dbName, tableName,
-              hmsPartitions.subList(i, numPartitionsToUpdate));
+              hmsPartitions.subList(i, numPartitionsToUpdate), null);
           // Mark the corresponding HdfsPartition objects as dirty
           for (org.apache.hadoop.hive.metastore.api.Partition msPartition:
                hmsPartitions.subList(i, numPartitionsToUpdate)) {
@@ -3061,7 +3059,7 @@
               partition.getSd().setSerdeInfo(msTbl.getSd().getSerdeInfo().deepCopy());
               partition.getSd().setLocation(msTbl.getSd().getLocation() + "/" +
                   partName.substring(0, partName.length() - 1));
-              MetaStoreUtils.updatePartitionStatsFast(partition, warehouse);
+              MetaStoreUtils.updatePartitionStatsFast(partition, warehouse, null);
             }
 
             // First add_partitions and then alter_partitions the successful ones with
@@ -3092,7 +3090,7 @@
                 }
                 try {
                   msClient.getHiveClient().alter_partitions(tblName.getDb(),
-                      tblName.getTbl(), cachedHmsParts);
+                      tblName.getTbl(), cachedHmsParts, null);
                 } catch (Exception e) {
                   LOG.error("Failed in alter_partitions: ", e);
                   // Try to uncache the partitions when the alteration in the HMS failed.
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java
index d236cb1..37793d4 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -35,10 +35,10 @@
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
 import org.apache.impala.analysis.AnalysisContext;
 import org.apache.impala.analysis.CreateDataSrcStmt;
 import org.apache.impala.analysis.CreateDropRoleStmt;
diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
index bff5342..e4b30a5 100644
--- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java
+++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
@@ -731,17 +731,6 @@
       errorCause.append(" is not enabled.\n");
     }
 
-    // dfs.client.file-block-storage-locations.timeout.millis should be >= 10 seconds
-    int dfsClientFileBlockStorageLocationsTimeoutMs = conf.getInt(
-        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
-        DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
-    if (dfsClientFileBlockStorageLocationsTimeoutMs <
-        MIN_DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS) {
-      errorCause.append(prefix);
-      errorCause.append(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS);
-      errorCause.append(" is too low. It should be at least 10 seconds.\n");
-    }
-
     if (errorCause.length() > 0) {
       output.append(errorMessage);
       output.append(errorCause);
diff --git a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
index 95ef700..d0feccd 100644
--- a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
@@ -67,7 +67,7 @@
     // Get the value from the Hive configuration, if present.
     HiveConf hiveConf = new HiveConf(HdfsTable.class);
     String strValue = hiveConf.get(
-        HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX.toString());
+        HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX.toString());
     if (strValue != null) {
       try {
         maxPartitionsPerRpc_ = Short.parseShort(strValue);
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index 9821694..e2b9195 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -30,9 +30,9 @@
 
 import org.apache.hadoop.conf.Configuration;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
 import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
 import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider;
 import org.junit.After;
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index 2b37bf3..17a90b1 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -2927,7 +2927,7 @@
     // may have unquoted identifiers corresponding to keywords.
     for (String keyword: SqlScanner.keywordMap.keySet()) {
       // Skip keywords that are not valid field/column names in the Metastore.
-      if (!MetaStoreUtils.validateName(keyword)) continue;
+      if (!MetaStoreUtils.validateName(keyword, null)) continue;
       String structType = "STRUCT<" + keyword + ":INT>";
       TypeDefsParseOk(structType);
     }
diff --git a/fe/src/test/java/org/apache/impala/service/FrontendTest.java b/fe/src/test/java/org/apache/impala/service/FrontendTest.java
index 24056dc..dbd35b3 100644
--- a/fe/src/test/java/org/apache/impala/service/FrontendTest.java
+++ b/fe/src/test/java/org/apache/impala/service/FrontendTest.java
@@ -25,12 +25,12 @@
 import java.util.Set;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.hive.service.cli.thrift.TGetCatalogsReq;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
-import org.apache.hive.service.cli.thrift.TGetInfoReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
+import org.apache.hive.service.rpc.thrift.TGetCatalogsReq;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
+import org.apache.hive.service.rpc.thrift.TGetInfoReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
 import org.junit.Test;
 import org.apache.impala.analysis.AuthorizationTest;
 import org.apache.impala.authorization.AuthorizationConfig;