HIVE-28121: Use direct SQL for transactional altering table parameter (Rui Li, reviewed by Peter Vary)
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 01ef457..9e8ec84 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -154,12 +154,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam
String expectedValue = environmentContext != null && environmentContext.getProperties() != null ?
environmentContext.getProperties().get(hive_metastoreConstants.EXPECTED_PARAMETER_VALUE) : null;
- if (expectedKey != null) {
- // If we have to check the expected state of the table we have to prevent nonrepeatable reads.
- msdb.openTransaction(Constants.TX_REPEATABLE_READ);
- } else {
- msdb.openTransaction();
- }
+ msdb.openTransaction();
// get old table
// Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats.
oldt = msdb.getTable(catName, dbname, name, null);
@@ -168,10 +163,20 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam
TableName.getQualified(catName, dbname, name) + " doesn't exist");
}
- if (expectedKey != null && expectedValue != null
- && !expectedValue.equals(oldt.getParameters().get(expectedKey))) {
- throw new MetaException("The table has been modified. The parameter value for key '" + expectedKey + "' is '"
- + oldt.getParameters().get(expectedKey) + "'. The expected was value was '" + expectedValue + "'");
+ if (expectedKey != null && expectedValue != null) {
+ String newValue = newt.getParameters().get(expectedKey);
+ if (newValue == null) {
+ throw new MetaException(String.format("New value for expected key %s is not set", expectedKey));
+ }
+ if (!expectedValue.equals(oldt.getParameters().get(expectedKey))) {
+ throw new MetaException("The table has been modified. The parameter value for key '" + expectedKey + "' is '"
+ + oldt.getParameters().get(expectedKey) + "'. The expected was value was '" + expectedValue + "'");
+ }
+ long affectedRows = msdb.updateParameterWithExpectedValue(oldt, expectedKey, expectedValue, newValue);
+ if (affectedRows != 1) {
+ // make sure concurrent modification exception messages have the same prefix
+ throw new MetaException("The table has been modified. The parameter value for key '" + expectedKey + "' is different");
+ }
}
if (oldt.getPartitionKeysSize() != 0) {
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index d883456..69eeb7a 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -2563,4 +2563,13 @@ private void getStatsTableListResult(
query.closeAll();
}
}
+
+ long updateTableParam(Table table, String key, String expectedValue, String newValue) {
+ String queryText = String.format("UPDATE \"TABLE_PARAMS\" SET \"PARAM_VALUE\" = ? " +
+ "WHERE \"PARAM_KEY\" = ? AND \"PARAM_VALUE\" = ? AND \"TBL_ID\" IN " +
+ "(SELECT \"TBL_ID\" FROM %s JOIN %s ON %s.\"DB_ID\" = %s.\"DB_ID\" WHERE \"TBL_NAME\" = '%s' AND \"NAME\" = '%s')",
+ TBLS, DBS, TBLS, DBS, table.getTableName(), table.getDbName());
+ Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+ return (long) query.executeWithArray(newValue, key, expectedValue);
+ }
}
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 07d56fd..e6b6578 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -519,6 +519,29 @@ public boolean openTransaction(String isolationLevel) {
return result;
}
+ @Override
+ public long updateParameterWithExpectedValue(Table table, String key, String expectedValue, String newValue)
+ throws MetaException, NoSuchObjectException {
+ return new GetHelper<Long>(table.getCatName(), table.getDbName(), table.getTableName(), true, false) {
+
+ @Override
+ protected String describeResult() {
+ return "Affected rows";
+ }
+
+ @Override
+ protected Long getSqlResult(GetHelper<Long> ctx) throws MetaException {
+ return directSql.updateTableParam(table, key, expectedValue, newValue);
+ }
+
+ @Override
+ protected Long getJdoResult(GetHelper<Long> ctx) throws MetaException, NoSuchObjectException {
+ throw new UnsupportedOperationException(
+ "Cannot update parameter with JDO, make sure direct SQL is enabled");
+ }
+ }.run(false);
+ }
+
/**
* if this is the commit of the first open call then an actual commit is
* called.
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index f0c9895..a0054a5 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -1779,4 +1779,14 @@ Map<String, List<String>> getPartitionColsWithStats(String catName, String dbNam
*/
List<String> isPartOfMaterializedView(String catName, String dbName, String tblName);
+ /**
+ * Updates a given table parameter with expected value.
+ *
+ * @return the number of rows updated
+ */
+ default long updateParameterWithExpectedValue(Table table, String key, String expectedValue, String newValue)
+ throws MetaException, NoSuchObjectException {
+ throw new UnsupportedOperationException("This Store doesn't support updating table parameter with expected value");
+ }
+
}
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 6457da0..4b02d11 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -428,6 +428,10 @@ public enum ConfVars {
"Default transaction isolation level for identity generation."),
DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy",
"datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
+ DATANUCLEUS_QUERY_SQL_ALLOWALL("datanucleus.query.sql.allowAll", "datanucleus.query.sql.allowAll",
+ true, "In strict JDO all SQL queries must begin with \"SELECT ...\", and consequently it "
+ + "is not possible to execute queries that change data. This DataNucleus property when set to true allows "
+ + "insert, update and delete operations from JDO SQL. Default value is true."),
DBACCESS_SSL_PROPS("metastore.dbaccess.ssl.properties", "hive.metastore.dbaccess.ssl.properties", "",
"Comma-separated SSL properties for metastore to access database when JDO connection URL\n" +
"enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."),
@@ -1185,6 +1189,7 @@ public String toString() {
ConfVars.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK,
ConfVars.DATANUCLEUS_TRANSACTION_ISOLATION,
ConfVars.DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY,
+ ConfVars.DATANUCLEUS_QUERY_SQL_ALLOWALL,
ConfVars.DETACH_ALL_ON_COMMIT,
ConfVars.IDENTIFIER_FACTORY,
ConfVars.MANAGER_FACTORY_CLASS,
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index 345c00c..dcde612 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@ -49,6 +49,7 @@
import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
import org.apache.thrift.TApplicationException;
import org.apache.thrift.TException;
@@ -56,6 +57,7 @@
import org.apache.thrift.transport.TTransportException;
import org.junit.After;
import org.junit.Assert;
+import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -105,7 +107,7 @@ public TestTablesCreateDropAlterTruncate(String name, AbstractMetaStoreService m
@BeforeClass
public static void startMetaStores() {
- Map<MetastoreConf.ConfVars, String> msConf = new HashMap<MetastoreConf.ConfVars, String>();
+ Map<ConfVars, String> msConf = new HashMap<ConfVars, String>();
// Enable trash, so it can be tested
Map<String, String> extraConf = new HashMap<>();
extraConf.put("fs.trash.checkpoint.interval", "30"); // FS_TRASH_CHECKPOINT_INTERVAL_KEY
@@ -1101,6 +1103,8 @@ public void testAlterTableAlreadyExists() throws Exception {
@Test
public void testAlterTableExpectedPropertyMatch() throws Exception {
+ Assume.assumeTrue(MetastoreConf.getBoolVar(metaStore.getConf(), ConfVars.TRY_DIRECT_SQL));
+ Assume.assumeTrue(MetastoreConf.getBoolVar(metaStore.getConf(), ConfVars.TRY_DIRECT_SQL_DDL));
Table originalTable = testTables[0];
EnvironmentContext context = new EnvironmentContext();
@@ -1114,6 +1118,8 @@ public void testAlterTableExpectedPropertyMatch() throws Exception {
@Test(expected = MetaException.class)
public void testAlterTableExpectedPropertyDifferent() throws Exception {
+ Assume.assumeTrue(MetastoreConf.getBoolVar(metaStore.getConf(), ConfVars.TRY_DIRECT_SQL));
+ Assume.assumeTrue(MetastoreConf.getBoolVar(metaStore.getConf(), ConfVars.TRY_DIRECT_SQL_DDL));
Table originalTable = testTables[0];
EnvironmentContext context = new EnvironmentContext();
@@ -1133,6 +1139,8 @@ public void testAlterTableExpectedPropertyDifferent() throws Exception {
*/
@Test
public void testAlterTableExpectedPropertyConcurrent() throws Exception {
+ Assume.assumeTrue(MetastoreConf.getBoolVar(metaStore.getConf(), ConfVars.TRY_DIRECT_SQL));
+ Assume.assumeTrue(MetastoreConf.getBoolVar(metaStore.getConf(), ConfVars.TRY_DIRECT_SQL_DDL));
Table originalTable = testTables[0];
originalTable.getParameters().put("snapshot", "0");