Add more tests for PHOENIX-6247
Signed-off-by: Gokcen Iskender <gokceng@gmail.com>
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
index 88e14ec..54d481f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CsvBulkLoadToolIT.java
@@ -466,7 +466,7 @@
admin.snapshot(snapshotName, TableName.valueOf(fullTableName));
admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableName));
}
- LogicalTableNameIT.renameAndDropPhysicalTable(conn, "NULL", schemaName, tableName, newTableName);
+ LogicalTableNameIT.renameAndDropPhysicalTable(conn, "NULL", schemaName, tableName, newTableName, false);
String csvName = "/tmp/input_logical_name.csv";
FileSystem fs = FileSystem.get(getUtility().getConfiguration());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameBaseIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameBaseIT.java
new file mode 100644
index 0000000..5834984
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameBaseIT.java
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.curator.shaded.com.google.common.base.Joiner;
+import org.apache.curator.shaded.com.google.common.collect.Lists;
+import org.apache.curator.shaded.com.google.common.collect.Maps;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.regionserver.ScanInfoUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.PhoenixTestBuilder;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+
+import static java.util.Arrays.asList;
+import static org.apache.phoenix.query.PhoenixTestBuilder.DDLDefaults.MAX_ROWS;
+import static org.apache.phoenix.query.QueryConstants.NAMESPACE_SEPARATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+
+public class LogicalTableNameBaseIT extends BaseTest {
+ protected String dataTableDdl = "";
+ public static final String NEW_TABLE_PREFIX = "NEW_TBL_";
+ private Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+
+ static void initCluster(boolean isNamespaceMapped) throws Exception {
+ Map<String, String> props = Maps.newConcurrentMap();
+ props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
+ props.put(ScanInfoUtil.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60*1000)); // An hour
+ if (isNamespaceMapped) {
+ props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString());
+ }
+ setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ }
+
+ protected Connection getConnection(Properties props) throws Exception {
+ props.setProperty(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+ // Force real driver to be used as the test one doesn't handle creating
+ // more than one ConnectionQueryService
+ props.setProperty(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, StringUtil.EMPTY_STRING);
+ // Create new ConnectionQueryServices so that we can set DROP_METADATA_ATTRIB
+ String url = QueryUtil.getConnectionUrl(props, config, "PRINCIPAL");
+ return DriverManager.getConnection(url, props);
+ }
+
+ public static void createAndPointToNewPhysicalTable(Connection conn, String fullTableHName, boolean isNamespaceEnabled) throws Exception{
+ String tableName = SchemaUtil.getTableNameFromFullName(fullTableHName);
+ String newTableName = NEW_TABLE_PREFIX + tableName;
+ createAndPointToNewPhysicalTable(conn, fullTableHName,newTableName, isNamespaceEnabled);
+ }
+
+ public static void createAndPointToNewPhysicalTable(Connection conn, String fullTableHName, String newTableName, boolean isNamespaceEnabled) throws Exception{
+ String tableName = SchemaUtil.getTableNameFromFullName(fullTableHName);
+ String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableHName);
+ String fullNewTableHName = schemaName + (isNamespaceEnabled? ":" : ".") + newTableName;
+ String
+ snapshotName =
+ new StringBuilder(tableName).append("-Snapshot").toString();
+
+ try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices()
+ .getAdmin()) {
+
+ admin.snapshot(snapshotName, TableName.valueOf(fullTableHName));
+ admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableHName));
+ admin.deleteSnapshot(snapshotName);
+ LogicalTableNameIT.renameAndDropPhysicalTable(conn, null, schemaName, tableName,
+ newTableName, isNamespaceEnabled);
+
+ }
+ }
+
+ protected HashMap<String, ArrayList<String>> testBaseTableWithIndex_BaseTableChange(Connection conn, Connection conn2,
+ String schemaName, String tableName, String indexName,
+ boolean isNamespaceEnabled,
+ boolean createChildAfterRename) throws Exception {
+ conn.setAutoCommit(true);
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ createTable(conn, fullTableName);
+ if (!createChildAfterRename) {
+ createIndexOnTable(conn, fullTableName, indexName);
+ }
+ HashMap<String, ArrayList<String>> expected = populateTable(conn, fullTableName, 1, 2);
+
+ // Create another hbase table and add 1 more row
+ String newTableName = NEW_TABLE_PREFIX + tableName;
+ String fullNewTableName = SchemaUtil.getTableName(schemaName, newTableName);
+ try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ String snapshotName = new StringBuilder(fullTableName).append("-Snapshot").toString();
+ admin.snapshot(snapshotName, TableName.valueOf(fullTableName));
+ admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableName));
+ admin.deleteSnapshot(snapshotName);
+ try (HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullNewTableName))) {
+ Put put = new Put(ByteUtil.concat(Bytes.toBytes("PK3")));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
+ QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("V13"));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V2"),
+ PInteger.INSTANCE.toBytes(3));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V3"),
+ PInteger.INSTANCE.toBytes(4));
+ htable.put(put);
+ expected.put("PK3", Lists.newArrayList("PK3", "V13", "3", "4"));
+ }
+ }
+
+ // Query to cache on the second connection
+ String selectTable1 = "SELECT PK1, V1, V2, V3 FROM " + fullTableName + " ORDER BY PK1 DESC";
+ ResultSet rs1 = conn2.createStatement().executeQuery(selectTable1);
+ assertTrue(rs1.next());
+
+ // Rename table to point to the new hbase table
+ renameAndDropPhysicalTable(conn, "NULL", schemaName, tableName, newTableName, isNamespaceEnabled);
+
+ if (createChildAfterRename) {
+ createIndexOnTable(conn, fullTableName, indexName);
+ }
+
+ return expected;
+ }
+
+ protected HashMap<String, ArrayList<String>> test_IndexTableChange(Connection conn, Connection conn2, String schemaName, String tableName,
+ String indexName,
+ byte[] verifiedBytes, boolean isNamespaceEnabled) throws Exception {
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+ conn.setAutoCommit(true);
+ createTable(conn, fullTableName);
+ createIndexOnTable(conn, fullTableName, indexName);
+ HashMap<String, ArrayList<String>> expected = populateTable(conn, fullTableName, 1, 2);
+
+ // Create another hbase table for index and add 1 more row
+ String newTableName = "NEW_IDXTBL_" + indexName;
+ String fullNewTableName = SchemaUtil.getTableName(schemaName, newTableName);
+ String fullIndexTableHbaseName = fullIndexName;
+ if (isNamespaceEnabled) {
+ fullNewTableName = schemaName + NAMESPACE_SEPARATOR + newTableName;
+ fullIndexTableHbaseName = schemaName + NAMESPACE_SEPARATOR + indexName;
+ }
+ try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices()
+ .getAdmin()) {
+ String snapshotName = new StringBuilder(indexName).append("-Snapshot").toString();
+ admin.snapshot(snapshotName, TableName.valueOf(fullIndexTableHbaseName));
+ admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableName));
+ admin.deleteSnapshot(snapshotName);
+ try (HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullNewTableName))) {
+ Put
+ put =
+ new Put(ByteUtil.concat(Bytes.toBytes("V13"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("PK3")));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
+ verifiedBytes);
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("0:V2"),
+ PInteger.INSTANCE.toBytes(3));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("0:V3"),
+ PInteger.INSTANCE.toBytes(4));
+ htable.put(put);
+ expected.put("PK3", Lists.newArrayList("PK3", "V13", "3", "4"));
+ }
+ }
+
+ // Query to cache on the second connection
+ String selectTable1 = "SELECT * FROM " + fullIndexName;
+ ResultSet rs1 = conn2.createStatement().executeQuery(selectTable1);
+ assertTrue(rs1.next());
+
+ // Rename table to point to the new hbase table
+ renameAndDropPhysicalTable(conn, "NULL", schemaName, indexName, newTableName, isNamespaceEnabled);
+
+ return expected;
+ }
+
+ protected HashMap<String, ArrayList<String>> testWithViewsAndIndex_BaseTableChange(Connection conn, Connection conn2, String tenantName,
+ String schemaName, String tableName,
+ String viewName1, String v1_indexName1, String v1_indexName2, String viewName2, String v2_indexName1, boolean isNamespaceEnabled,
+ boolean createChildAfterRename) throws Exception {
+ conn.setAutoCommit(true);
+ conn2.setAutoCommit(true);
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String fullViewName1 = SchemaUtil.getTableName(schemaName, viewName1);
+ String fullViewName2 = SchemaUtil.getTableName(schemaName, viewName2);
+ createTable(conn, fullTableName);
+ HashMap<String, ArrayList<String>> expected = new HashMap<>();
+ if (!createChildAfterRename) {
+ createViewAndIndex(conn2, schemaName, tableName, viewName1, v1_indexName1);
+ createViewAndIndex(conn2, schemaName, tableName, viewName1, v1_indexName2);
+ createViewAndIndex(conn2, schemaName, tableName, viewName2, v2_indexName1);
+ expected.putAll(populateView(conn, fullViewName1, 1,2));
+ expected.putAll(populateView(conn, fullViewName2, 10,2));
+ }
+
+ // Create another hbase table and add 1 more row
+ String newTableName = NEW_TABLE_PREFIX + generateUniqueName();
+ String fullNewTableName = SchemaUtil.getTableName(schemaName, newTableName);
+ String fullTableHbaseName = fullTableName;
+ if (isNamespaceEnabled) {
+ fullNewTableName = schemaName + NAMESPACE_SEPARATOR + newTableName;
+ fullTableHbaseName = schemaName + NAMESPACE_SEPARATOR + tableName;
+ }
+ try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices()
+ .getAdmin()) {
+ String snapshotName = new StringBuilder(fullTableName).append("-Snapshot").toString();
+ admin.snapshot(snapshotName, TableName.valueOf(fullTableHbaseName));
+ admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableName));
+ admin.deleteSnapshot(snapshotName);
+ try (HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullNewTableName))) {
+ Put put = new Put(ByteUtil.concat(Bytes.toBytes("PK3")));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
+ QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"),
+ Bytes.toBytes("V13"));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V2"),
+ PInteger.INSTANCE.toBytes(3));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V3"),
+ PInteger.INSTANCE.toBytes(4));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("VIEW_COL1"),
+ Bytes.toBytes("VIEW_COL1_3"));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("VIEW_COL2"),
+ Bytes.toBytes("VIEW_COL2_3"));
+ htable.put(put);
+ expected.put("PK3", Lists.newArrayList("PK3", "V13", "3", "4", "VIEW_COL1_3", "VIEW_COL2_3"));
+ }
+ }
+
+ // Query to cache on the second connection
+ if (tenantName != null) {
+ String selectTable1 = "SELECT PK1, V1, V2, V3 FROM " + fullTableName + " ORDER BY PK1 DESC";
+ ResultSet rs1 = conn2.createStatement().executeQuery(selectTable1);
+ if (!createChildAfterRename) {
+ assertTrue(rs1.next());
+ }
+ }
+
+ // Rename table to point to hbase table
+ renameAndDropPhysicalTable(conn, "NULL", schemaName, tableName, newTableName, isNamespaceEnabled);
+
+ conn.unwrap(PhoenixConnection.class).getQueryServices().clearCache();
+ if (createChildAfterRename) {
+ createViewAndIndex(conn2, schemaName, tableName, viewName1, v1_indexName1);
+ createViewAndIndex(conn2, schemaName, tableName, viewName1, v1_indexName2);
+ createViewAndIndex(conn2, schemaName, tableName, viewName2, v2_indexName1);
+ expected.putAll(populateView(conn2, fullViewName1, 1,2));
+ expected.putAll(populateView(conn2, fullViewName2, 10,2));
+ }
+
+ return expected;
+ }
+
+ protected PhoenixTestBuilder.SchemaBuilder testGlobalViewAndTenantView(boolean createChildAfterRename, boolean isNamespaceEnabled) throws Exception {
+ int numOfRows = 5;
+ PhoenixTestBuilder.SchemaBuilder.TableOptions tableOptions = PhoenixTestBuilder.SchemaBuilder.TableOptions.withDefaults();
+ tableOptions.getTableColumns().clear();
+ tableOptions.getTableColumnTypes().clear();
+ tableOptions.setTableProps(" MULTI_TENANT=true, COLUMN_ENCODED_BYTES=0 "+this.dataTableDdl);
+
+ PhoenixTestBuilder.SchemaBuilder.GlobalViewOptions globalViewOptions = PhoenixTestBuilder.SchemaBuilder.GlobalViewOptions.withDefaults();
+
+ PhoenixTestBuilder.SchemaBuilder.GlobalViewIndexOptions globalViewIndexOptions =
+ PhoenixTestBuilder.SchemaBuilder.GlobalViewIndexOptions.withDefaults();
+ globalViewIndexOptions.setLocal(false);
+
+ PhoenixTestBuilder.SchemaBuilder.TenantViewOptions tenantViewOptions = new PhoenixTestBuilder.SchemaBuilder.TenantViewOptions();
+ tenantViewOptions.setTenantViewColumns(asList("ZID", "COL7", "COL8", "COL9"));
+ tenantViewOptions.setTenantViewColumnTypes(asList("CHAR(15)", "VARCHAR", "VARCHAR", "VARCHAR"));
+
+ PhoenixTestBuilder.SchemaBuilder.OtherOptions testCaseWhenAllCFMatchAndAllDefault = new PhoenixTestBuilder.SchemaBuilder.OtherOptions();
+ testCaseWhenAllCFMatchAndAllDefault.setTestName("testCaseWhenAllCFMatchAndAllDefault");
+ testCaseWhenAllCFMatchAndAllDefault
+ .setTableCFs(Lists.newArrayList((String) null, null, null));
+ testCaseWhenAllCFMatchAndAllDefault
+ .setGlobalViewCFs(Lists.newArrayList((String) null, null, null));
+ testCaseWhenAllCFMatchAndAllDefault
+ .setTenantViewCFs(Lists.newArrayList((String) null, null, null, null));
+
+ // Define the test schema.
+ PhoenixTestBuilder.SchemaBuilder schemaBuilder = null;
+ if (!createChildAfterRename) {
+ schemaBuilder = new PhoenixTestBuilder.SchemaBuilder(getUrl());
+ schemaBuilder.withTableOptions(tableOptions).withGlobalViewOptions(globalViewOptions)
+ .withGlobalViewIndexOptions(globalViewIndexOptions)
+ .withTenantViewOptions(tenantViewOptions)
+ .withOtherOptions(testCaseWhenAllCFMatchAndAllDefault).build();
+ } else {
+ schemaBuilder = new PhoenixTestBuilder.SchemaBuilder(getUrl());
+ schemaBuilder.withTableOptions(tableOptions).build();
+ }
+
+ PTable table = schemaBuilder.getBaseTable();
+ String schemaName = table.getSchemaName().getString();
+ String tableName = table.getTableName().getString();
+ String newBaseTableName = NEW_TABLE_PREFIX + tableName;
+ String fullNewBaseTableName = SchemaUtil.getTableName(schemaName, newBaseTableName);
+ String fullTableName = table.getName().getString();
+
+ String fullTableHName = schemaName + ":" + tableName;
+ String fullNewTableHName = schemaName + ":" + newBaseTableName;
+ try (Connection conn = getConnection(props)) {
+ createAndPointToNewPhysicalTable(conn, fullTableHName, newBaseTableName, isNamespaceEnabled);
+ }
+
+ if (createChildAfterRename) {
+ schemaBuilder = new PhoenixTestBuilder.SchemaBuilder(getUrl());
+ schemaBuilder.withDataOptions(schemaBuilder.getDataOptions())
+ .withTableOptions(tableOptions)
+ .withGlobalViewOptions(globalViewOptions)
+ .withGlobalViewIndexOptions(globalViewIndexOptions)
+ .withTenantViewOptions(tenantViewOptions)
+ .withOtherOptions(testCaseWhenAllCFMatchAndAllDefault).build();
+ }
+
+ // Define the test data.
+ PhoenixTestBuilder.DataSupplier dataSupplier = new PhoenixTestBuilder.DataSupplier() {
+
+ @Override public List<Object> getValues(int rowIndex) {
+ Random rnd = new Random();
+ String id = String.format(ViewTTLIT.ID_FMT, rowIndex);
+ String zid = String.format(ViewTTLIT.ZID_FMT, rowIndex);
+ String col4 = String.format(ViewTTLIT.COL4_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
+ String col5 = String.format(ViewTTLIT.COL5_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
+ String col6 = String.format(ViewTTLIT.COL6_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
+ String col7 = String.format(ViewTTLIT.COL7_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
+ String col8 = String.format(ViewTTLIT.COL8_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
+ String col9 = String.format(ViewTTLIT.COL9_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
+
+ return Lists.newArrayList(
+ new Object[] { id, zid, col4, col5, col6, col7, col8, col9 });
+ }
+ };
+
+ // Create a test data reader/writer for the above schema.
+ PhoenixTestBuilder.DataWriter dataWriter = new PhoenixTestBuilder.BasicDataWriter();
+ List<String> columns =
+ Lists.newArrayList("ID", "ZID", "COL4", "COL5", "COL6", "COL7", "COL8", "COL9");
+ List<String> rowKeyColumns = Lists.newArrayList("ID", "ZID");
+
+ String tenantConnectUrl =
+ getUrl() + ';' + TENANT_ID_ATTRIB + '=' + schemaBuilder.getDataOptions().getTenantId();
+
+ try (Connection tenantConnection = DriverManager.getConnection(tenantConnectUrl)) {
+ tenantConnection.setAutoCommit(true);
+ dataWriter.setConnection(tenantConnection);
+ dataWriter.setDataSupplier(dataSupplier);
+ dataWriter.setUpsertColumns(columns);
+ dataWriter.setRowKeyColumns(rowKeyColumns);
+ dataWriter.setTargetEntity(schemaBuilder.getEntityTenantViewName());
+ dataWriter.upsertRows(1, numOfRows);
+ com.google.common.collect.Table<String, String, Object> upsertedData = dataWriter.getDataTable();;
+
+ PhoenixTestBuilder.DataReader dataReader = new PhoenixTestBuilder.BasicDataReader();
+ dataReader.setValidationColumns(columns);
+ dataReader.setRowKeyColumns(rowKeyColumns);
+ dataReader.setDML(String.format("SELECT %s from %s", Joiner.on(",").join(columns),
+ schemaBuilder.getEntityTenantViewName()));
+ dataReader.setTargetEntity(schemaBuilder.getEntityTenantViewName());
+ dataReader.setConnection(tenantConnection);
+ dataReader.readRows();
+ com.google.common.collect.Table<String, String, Object> fetchedData
+ = dataReader.getDataTable();
+ assertNotNull("Fetched data should not be null", fetchedData);
+ ViewTTLIT.verifyRowsBeforeTTLExpiration(upsertedData, fetchedData);
+
+ }
+ return schemaBuilder;
+ }
+
+ protected void createTable(Connection conn, String tableName) throws Exception {
+ String createTableSql = "CREATE TABLE " + tableName + " (PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 INTEGER, V3 INTEGER "
+ + "CONSTRAINT NAME_PK PRIMARY KEY(PK1)) COLUMN_ENCODED_BYTES=0 " + dataTableDdl;
+ conn.createStatement().execute(createTableSql);
+ }
+
+ protected void createIndexOnTable(Connection conn, String tableName, String indexName, boolean isLocal)
+ throws SQLException {
+ String createIndexSql = "CREATE " + (isLocal? " LOCAL ":"") + " INDEX " + indexName + " ON " + tableName + " (V1) INCLUDE (V2, V3) ";
+ conn.createStatement().execute(createIndexSql);
+ }
+
+ protected void createIndexOnTable(Connection conn, String tableName, String indexName)
+ throws SQLException {
+ createIndexOnTable(conn, tableName, indexName, false);
+ }
+
+ protected void dropIndex(Connection conn, String tableName, String indexName)
+ throws SQLException {
+ String sql = "DROP INDEX " + indexName + " ON " + tableName ;
+ conn.createStatement().execute(sql);
+ }
+
+ protected HashMap<String, ArrayList<String>> populateTable(Connection conn, String tableName, int startnum, int numOfRows)
+ throws SQLException {
+ String upsert = "UPSERT INTO " + tableName + " (PK1, V1, V2, V3) VALUES (?,?,?,?)";
+ PreparedStatement upsertStmt = conn.prepareStatement(upsert);
+ HashMap<String, ArrayList<String>> result = new HashMap<>();
+ for (int i=startnum; i < startnum + numOfRows; i++) {
+ ArrayList<String> row = new ArrayList<>();
+ upsertStmt.setString(1, "PK" + i);
+ row.add("PK" + i);
+ upsertStmt.setString(2, "V1" + i);
+ row.add("V1" + i);
+ upsertStmt.setInt(3, i);
+ row.add(String.valueOf(i));
+ upsertStmt.setInt(4, i + 1);
+ row.add(String.valueOf(i + 1));
+ upsertStmt.executeUpdate();
+ result.put("PK" + i, row);
+ }
+ return result;
+ }
+
+ protected HashMap<String, ArrayList<String>> populateView(Connection conn, String viewName, int startNum, int numOfRows) throws SQLException {
+ String upsert = "UPSERT INTO " + viewName + " (PK1, V1, V2, V3, VIEW_COL1, VIEW_COL2) VALUES (?,?,?,?,?,?)";
+ PreparedStatement upsertStmt = conn.prepareStatement(upsert);
+ HashMap<String, ArrayList<String>> result = new HashMap<>();
+ for (int i=startNum; i < startNum + numOfRows; i++) {
+ ArrayList<String> row = new ArrayList<>();
+ upsertStmt.setString(1, "PK"+i);
+ row.add("PK"+i);
+ upsertStmt.setString(2, "V1"+i);
+ row.add("V1"+i);
+ upsertStmt.setInt(3, i);
+ row.add(String.valueOf(i));
+ upsertStmt.setInt(4, i+1);
+ row.add(String.valueOf(i+1));
+ upsertStmt.setString(5, "VIEW_COL1_"+i);
+ row.add("VIEW_COL1_"+i);
+ upsertStmt.setString(6, "VIEW_COL2_"+i);
+ row.add("VIEW_COL2_"+i);
+ upsertStmt.executeUpdate();
+ result.put("PK"+i, row);
+ }
+ return result;
+ }
+
+ protected void createViewAndIndex(Connection conn, String schemaName, String tableName, String viewName, String viewIndexName)
+ throws SQLException {
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String fullViewName = SchemaUtil.getTableName(schemaName, viewName);
+ String
+ view1DDL =
+ "CREATE VIEW IF NOT EXISTS " + fullViewName + " ( VIEW_COL1 VARCHAR, VIEW_COL2 VARCHAR) AS SELECT * FROM "
+ + fullTableName;
+ conn.createStatement().execute(view1DDL);
+ String indexDDL = "CREATE INDEX IF NOT EXISTS " + viewIndexName + " ON " + fullViewName + " (V1) include (V2, V3, VIEW_COL2) ";
+ conn.createStatement().execute(indexDDL);
+ conn.commit();
+ }
+
+ protected void validateTable(Connection connection, String tableName) throws SQLException {
+ String selectTable = "SELECT PK1, V1, V2, V3 FROM " + tableName + " ORDER BY PK1 DESC";
+ ResultSet rs = connection.createStatement().executeQuery(selectTable);
+ assertTrue(rs.next());
+ assertEquals("PK3", rs.getString(1));
+ assertEquals("V13", rs.getString(2));
+ assertEquals(3, rs.getInt(3));
+ assertEquals(4, rs.getInt(4));
+ assertTrue(rs.next());
+ assertEquals("PK2", rs.getString(1));
+ assertEquals("V12", rs.getString(2));
+ assertEquals(2, rs.getInt(3));
+ assertEquals(3, rs.getInt(4));
+ assertTrue(rs.next());
+ assertEquals("PK1", rs.getString(1));
+ assertEquals("V11", rs.getString(2));
+ assertEquals(1, rs.getInt(3));
+ assertEquals(2, rs.getInt(4));
+ }
+
+ protected void validateIndex(Connection connection, String tableName, boolean isViewIndex, HashMap<String, ArrayList<String>> expected) throws SQLException {
+ String selectTable = "SELECT * FROM " + tableName;
+ ResultSet rs = connection.createStatement().executeQuery(selectTable);
+ int cnt = 0;
+ while (rs.next()) {
+ String pk = rs.getString(2);
+ assertTrue(expected.containsKey(pk));
+ ArrayList<String> row = expected.get(pk);
+ assertEquals(row.get(1), rs.getString(1));
+ assertEquals(row.get(2), rs.getString(3));
+ assertEquals(row.get(3), rs.getString(4));
+ if (isViewIndex) {
+ assertEquals(row.get(5), rs.getString(5));
+ }
+ cnt++;
+ }
+ assertEquals(cnt, expected.size());
+ }
+
+ public static void renameAndDropPhysicalTable(Connection conn, String tenantId, String schema, String tableName, String physicalName, boolean isNamespaceEnabled) throws Exception {
+ String
+ changeName =
+ String.format(
+ "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, PHYSICAL_TABLE_NAME) VALUES (%s, '%s', '%s', NULL, NULL, '%s')",
+ tenantId, schema, tableName, physicalName);
+ conn.createStatement().execute(changeName);
+ conn.commit();
+
+ String fullTableName = SchemaUtil.getTableName(schema, tableName);
+ if (isNamespaceEnabled) {
+ fullTableName = schema + NAMESPACE_SEPARATOR + tableName;
+ }
+ Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
+ TableName hTableName = TableName.valueOf(fullTableName);
+ admin.disableTable(hTableName);
+ admin.deleteTable(hTableName);
+ conn.unwrap(PhoenixConnection.class).getQueryServices()
+ .clearCache();
+ }
+}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameExtendedIT.java
new file mode 100644
index 0000000..cb58962
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameExtendedIT.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.types.PInteger;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Properties;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class LogicalTableNameExtendedIT extends LogicalTableNameBaseIT {
+ private Properties propsNamespace = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+
+ @BeforeClass
+ public static synchronized void doSetup() throws Exception {
+ initCluster(true);
+ }
+
+ public LogicalTableNameExtendedIT() {
+ propsNamespace.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
+ }
+
+ @Test
+ public void testUpdatePhysicalTableName_namespaceMapped() throws Exception {
+ String schemaName = "S_" + generateUniqueName();
+ String tableName = "TBL_" + generateUniqueName();
+ String view1Name = "VW1_" + generateUniqueName();
+ String view1IndexName1 = "VW1IDX1_" + generateUniqueName();
+ String view1IndexName2 = "VW1IDX2_" + generateUniqueName();
+ String view2Name = "VW2_" + generateUniqueName();
+ String view2IndexName1 = "VW2IDX1_" + generateUniqueName();
+
+ try (Connection conn = getConnection(propsNamespace)) {
+ try (Connection conn2 = getConnection(propsNamespace)) {
+ conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
+ testWithViewsAndIndex_BaseTableChange(conn, conn2, null, schemaName, tableName, view1Name,
+ view1IndexName1, view1IndexName2, view2Name, view2IndexName1, true, false);
+
+ populateView(conn, (schemaName+"."+view2Name), 10, 1);
+ ResultSet rs = conn2.createStatement().executeQuery("SELECT * FROM " + (schemaName + "." + view2IndexName1) + " WHERE \":PK1\"='PK10'");
+ assertEquals(true, rs.next());
+
+ }
+ }
+ }
+
+ private void test_bothTableAndIndexHaveDifferentNames(Connection conn, Connection conn2, String schemaName, String tableName, String indexName) throws Exception {
+ String fullTableHName = schemaName + ":" + tableName;
+ String fullIndexHName = schemaName + ":" + indexName;
+
+ conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
+ // Create tables and change physical index table
+ test_IndexTableChange(conn, conn2, schemaName, tableName, indexName,
+ IndexRegionObserver.UNVERIFIED_BYTES, true);
+ // Now change physical data table
+ createAndPointToNewPhysicalTable(conn, fullTableHName, true);
+ try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices()
+ .getAdmin()) {
+ assertEquals(false, admin.tableExists(TableName.valueOf(fullTableHName)));
+ assertEquals(false, admin.tableExists(TableName.valueOf(fullIndexHName)));
+ }
+ }
+
+ @Test
+ public void testUpdatePhysicalTableName_bothTableAndIndexHaveDifferentNames() throws Exception {
+ String schemaName = "S_" + generateUniqueName();
+ String tableName = "TBL_" + generateUniqueName();
+ String indexName = "IDX_" + generateUniqueName();
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String fullTableHName = schemaName + ":" + tableName;
+ String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+ String fullIndexHName = schemaName + ":" + indexName;
+ String fullNewTableHName = schemaName + ":NEW_TBL_" + tableName;
+ try (Connection conn = getConnection(propsNamespace)) {
+ try (Connection conn2 = getConnection(propsNamespace)) {
+ test_bothTableAndIndexHaveDifferentNames(conn, conn2, schemaName, tableName, indexName);
+ try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices()
+ .getAdmin()) {
+ conn2.setAutoCommit(true);
+ // Add row and check
+ populateTable(conn2, fullTableName, 10, 1);
+ ResultSet
+ rs =
+ conn2.createStatement().executeQuery("SELECT * FROM " + fullIndexName + " WHERE \":PK1\"='PK10'");
+ assertEquals(true, rs.next());
+ rs = conn.createStatement().executeQuery("SELECT * FROM " + fullTableName + " WHERE PK1='PK10'");
+ assertEquals(true, rs.next());
+
+ // Drop row and check
+ conn.createStatement().execute("DELETE from " + fullTableName + " WHERE PK1='PK10'");
+ rs = conn2.createStatement().executeQuery("SELECT * FROM " + fullIndexName + " WHERE \":PK1\"='PK10'");
+ assertEquals(false, rs.next());
+ rs = conn.createStatement().executeQuery("SELECT * FROM " + fullTableName + " WHERE PK1='PK10'");
+ assertEquals(false, rs.next());
+
+ // Add a row and run IndexTool to check that the row is there on the other side
+ rs = conn.createStatement().executeQuery("SELECT * FROM " + fullIndexName + " WHERE \":PK1\"='PK30'");
+ assertEquals(false, rs.next());
+ try (HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullNewTableHName))) {
+ Put put = new Put(ByteUtil.concat(Bytes.toBytes("PK30")));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
+ QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"),
+ Bytes.toBytes("V30"));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V2"),
+ PInteger.INSTANCE.toBytes(32));
+ put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V3"),
+ PInteger.INSTANCE.toBytes(33));
+ htable.put(put);
+ }
+
+ IndexToolIT.runIndexTool(true, false, schemaName, tableName, indexName);
+ rs = conn.createStatement().executeQuery("SELECT * FROM " + fullIndexName + " WHERE \":PK1\"='PK30'");
+ assertEquals(true, rs.next());
+
+ // Drop tables
+ conn2.createStatement().execute("DROP TABLE " + fullTableName);
+ // check that the physical data table is dropped
+ assertEquals(false, admin.tableExists(TableName.valueOf(fullNewTableHName)));
+
+ // check that index is dropped
+ assertEquals(false, admin.tableExists(TableName.valueOf((schemaName + ":NEW_IDXTBL_" + indexName))));
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testUpdatePhysicalTableName_alterTable() throws Exception {
+ String schemaName = "S_" + generateUniqueName();
+ String tableName = "TBL_" + generateUniqueName();
+ String indexName = "IDX_" + generateUniqueName();
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String fullTableHName = schemaName + ":" + tableName;
+ String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+ String fullIndexHName = schemaName + ":" + indexName;
+ String fullNewTableHName = schemaName + ":NEW_TBL_" + tableName;
+ try (Connection conn = getConnection(propsNamespace)) {
+ try (Connection conn2 = getConnection(propsNamespace)) {
+ test_bothTableAndIndexHaveDifferentNames(conn, conn2, schemaName, tableName, indexName);
+ conn2.setAutoCommit(true);
+
+ conn2.createStatement().execute("ALTER TABLE " + fullTableName + " ADD new_column_1 VARCHAR(64) CASCADE INDEX ALL");
+ conn2.createStatement().execute("UPSERT INTO " + fullTableName + " (PK1, V1, new_column_1) VALUES ('a', 'v1', 'new_col_val')");
+ ResultSet
+ rs =
+ conn2.createStatement().executeQuery("SELECT \"0:NEW_COLUMN_1\" FROM " + fullIndexName);
+ assertEquals(true, rs.next());
+ rs = conn.createStatement().executeQuery("SELECT NEW_COLUMN_1 FROM " + fullTableName + " WHERE NEW_COLUMN_1 IS NOT NULL");
+ assertEquals(true, rs.next());
+ assertEquals(false, rs.next());
+
+ // Drop column, check is that there are no exceptions
+ conn.createStatement().execute("ALTER TABLE " + fullTableName + " DROP COLUMN NEW_COLUMN_1");
+ }
+ }
+ }
+
+ @Test
+ public void testUpdatePhysicalTableName_tenantViews() throws Exception {
+
+ try (Connection conn = getConnection(propsNamespace)) {
+ conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS TEST_ENTITY");
+ }
+ testGlobalViewAndTenantView(false, true);
+ testGlobalViewAndTenantView(true, true);
+ }
+
+ @Test
+ public void testUpdatePhysicalTableName_localIndex() throws Exception {
+ String schemaName = "S_" + generateUniqueName();
+ String tableName = "TBL_" + generateUniqueName();
+ String indexName = "LCL_IDX_" + generateUniqueName();
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
+ String fullHTableName = schemaName + ":" + tableName;
+
+ try (Connection conn = getConnection(propsNamespace)) {
+ conn.setAutoCommit(true);
+ conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
+ createTable(conn, fullTableName);
+
+ createIndexOnTable(conn, fullTableName, indexName, true);
+ HashMap<String, ArrayList<String>> expected = populateTable(conn, fullTableName, 1, 2);
+ createAndPointToNewPhysicalTable(conn, fullHTableName, true);
+
+ String select = "SELECT * FROM " + fullIndexName;
+ ResultSet rs = conn.createStatement().executeQuery( select);
+ assertEquals(true, rs.next());
+ validateIndex(conn, fullIndexName,false, expected);
+
+ // Drop and recreate
+ conn.createStatement().execute("DROP INDEX " + indexName + " ON " + fullTableName);
+ createIndexOnTable(conn, fullTableName, indexName, true);
+ rs = conn.createStatement().executeQuery(select);
+ assertEquals(true, rs.next());
+ validateIndex(conn, fullIndexName,false, expected);
+ }
+ }
+
+ @Test
+ public void testUpdatePhysicalTableName_viewIndexSequence() throws Exception {
+ String schemaName = "S_" + generateUniqueName();
+ String tableName = "TBL_" + generateUniqueName();
+ String viewName = "VW1_" + generateUniqueName();
+ String viewIndexName1 = "VWIDX1_" + generateUniqueName();
+ String viewIndexName2 = "VWIDX2_" + generateUniqueName();
+ String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+ String fullViewName = SchemaUtil.getTableName(schemaName, viewName);
+ String fullViewIndex1Name = SchemaUtil.getTableName(schemaName, viewIndexName1);
+ String fullViewIndex2Name = SchemaUtil.getTableName(schemaName, viewIndexName2);
+ String fullTableHName = schemaName + ":" + tableName;
+ try (Connection conn = getConnection(propsNamespace)) {
+ conn.setAutoCommit(true);
+ conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
+ createTable(conn, fullTableName);
+ createViewAndIndex(conn, schemaName, tableName, viewName, viewIndexName1);
+ HashMap<String, ArrayList<String>> expected = populateView(conn, fullViewName, 1, 1);
+ createAndPointToNewPhysicalTable(conn, fullTableHName, true);
+ validateIndex(conn, fullViewIndex1Name,true, expected);
+ String indexDDL = "CREATE INDEX IF NOT EXISTS " + viewIndexName2 + " ON " + fullViewName + " (VIEW_COL1) include (VIEW_COL2) ";
+ conn.createStatement().execute(indexDDL);
+ ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " + fullViewIndex2Name);
+ assertEquals(true, rs.next());
+ assertEquals("VIEW_COL1_1", rs.getString(1));
+ assertEquals("PK1", rs.getString(2));
+ assertEquals("VIEW_COL2_1", rs.getString(3));
+ assertEquals(false, rs.next());
+ expected.putAll(populateView(conn, fullViewName, 10, 1));
+
+ validateIndex(conn, fullViewIndex1Name, true, expected);
+ rs = conn.createStatement().executeQuery("SELECT * FROM " + fullViewIndex2Name + " WHERE \"0:VIEW_COL1\"='VIEW_COL1_10'");
+ assertEquals(true, rs.next());
+ assertEquals("VIEW_COL1_10", rs.getString(1));
+ assertEquals("PK10", rs.getString(2));
+ assertEquals("VIEW_COL2_10", rs.getString(3));
+ assertEquals(false, rs.next());
+ }
+ }
+}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameIT.java
index 15388a8..c4f3c2e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameIT.java
@@ -17,34 +17,18 @@
*/
package org.apache.phoenix.end2end;
-import org.apache.curator.shaded.com.google.common.base.Joiner;
import org.apache.curator.shaded.com.google.common.collect.Lists;
-import org.apache.curator.shaded.com.google.common.collect.Maps;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.regionserver.ScanInfoUtil;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
-import org.apache.phoenix.end2end.index.SingleCellIndexIT;
+import org.apache.phoenix.end2end.join.HashJoinGlobalIndexIT;
import org.apache.phoenix.hbase.index.IndexRegionObserver;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.mapreduce.index.IndexScrutinyTool;
-import org.apache.phoenix.query.BaseTest;
-import org.apache.phoenix.query.PhoenixTestBuilder;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.StringUtil;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -54,7 +38,6 @@
import org.slf4j.LoggerFactory;
import java.sql.Connection;
-import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
@@ -62,42 +45,32 @@
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.Properties;
-import java.util.Random;
-import static java.util.Arrays.asList;
import static org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters.INVALID_ROW_COUNT;
import static org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters.VALID_ROW_COUNT;
-import static org.apache.phoenix.query.PhoenixTestBuilder.DDLDefaults.MAX_ROWS;
-import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.apache.phoenix.util.MetaDataUtil.VIEW_INDEX_TABLE_PREFIX;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@RunWith(Parameterized.class)
@Category(NeedsOwnMiniClusterTest.class)
-public class LogicalTableNameIT extends BaseTest {
+public class LogicalTableNameIT extends LogicalTableNameBaseIT {
private static final Logger LOGGER = LoggerFactory.getLogger(LogicalTableNameIT.class);
- private final boolean createChildAfterTransform;
- private final boolean immutable;
- private String dataTableDdl;
- public static final String NEW_TABLE_PREFIX = "NEW_TBL_";
+ protected boolean createChildAfterRename;
+ private boolean immutable;
private Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
@BeforeClass
public static synchronized void doSetup() throws Exception {
- Map<String, String> props = Maps.newConcurrentMap();
- props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
- props.put(ScanInfoUtil.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60*1000)); // An hour
- setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+ initCluster(false);
}
- public LogicalTableNameIT(boolean createChildAfterTransform, boolean immutable) {
- this.createChildAfterTransform = createChildAfterTransform;
+ public LogicalTableNameIT(boolean createChildAfterRename, boolean immutable) {
+ this.createChildAfterRename = createChildAfterRename;
this.immutable = immutable;
StringBuilder optionBuilder = new StringBuilder();
if (immutable) {
@@ -107,7 +80,7 @@
}
@Parameterized.Parameters(
- name = "createChildAfterTransform={0}, immutable={1}")
+ name = "createChildAfterRename={0}, immutable={1}")
public static synchronized Collection<Object[]> data() {
List<Object[]> list = Lists.newArrayListWithExpectedSize(2);
boolean[] Booleans = new boolean[] { false, true };
@@ -120,63 +93,6 @@
return list;
}
- private Connection getConnection(Properties props) throws Exception {
- props.setProperty(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
- // Force real driver to be used as the test one doesn't handle creating
- // more than one ConnectionQueryService
- props.setProperty(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, StringUtil.EMPTY_STRING);
- // Create new ConnectionQueryServices so that we can set DROP_METADATA_ATTRIB
- String url = QueryUtil.getConnectionUrl(props, config, "PRINCIPAL");
- return DriverManager.getConnection(url, props);
- }
-
- private HashMap<String, ArrayList<String>> testBaseTableWithIndex_BaseTableChange(Connection conn, Connection conn2, String schemaName,
- String tableName, String indexName) throws Exception {
- conn.setAutoCommit(true);
- String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
- createTable(conn, fullTableName);
- if (!createChildAfterTransform) {
- createIndexOnTable(conn, fullTableName, indexName);
- }
- HashMap<String, ArrayList<String>> expected = populateTable(conn, fullTableName, 1, 2);
-
- // Create another hbase table and add 1 more row
- String newTableName = NEW_TABLE_PREFIX + tableName;
- String fullNewTableName = SchemaUtil.getTableName(schemaName, newTableName);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- String snapshotName = new StringBuilder(fullTableName).append("-Snapshot").toString();
- admin.snapshot(snapshotName, TableName.valueOf(fullTableName));
- admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableName));
-
- try (HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullNewTableName))) {
- Put put = new Put(ByteUtil.concat(Bytes.toBytes("PK3")));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
- QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("V13"));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V2"),
- PInteger.INSTANCE.toBytes(3));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V3"),
- PInteger.INSTANCE.toBytes(4));
- htable.put(put);
- expected.put("PK3", Lists.newArrayList("PK3", "V13", "3", "4"));
- }
- }
-
- // Query to cache on the second connection
- String selectTable1 = "SELECT PK1, V1, V2, V3 FROM " + fullTableName + " ORDER BY PK1 DESC";
- ResultSet rs1 = conn2.createStatement().executeQuery(selectTable1);
- assertTrue(rs1.next());
-
- // Rename table to point to the new hbase table
- renameAndDropPhysicalTable(conn, "NULL", schemaName, tableName, newTableName);
-
- if (createChildAfterTransform) {
- createIndexOnTable(conn, fullTableName, indexName);
- }
-
- return expected;
- }
-
@Test
public void testUpdatePhysicalTableNameWithIndex() throws Exception {
String schemaName = "S_" + generateUniqueName();
@@ -187,7 +103,8 @@
try (Connection conn = getConnection(props)) {
try (Connection conn2 = getConnection(props)) {
- HashMap<String, ArrayList<String>> expected = testBaseTableWithIndex_BaseTableChange(conn, conn2, schemaName, tableName, indexName);
+ HashMap<String, ArrayList<String>> expected = testBaseTableWithIndex_BaseTableChange(conn, conn2, schemaName, tableName,
+ indexName, false, createChildAfterRename);
// We have to rebuild index for this to work
IndexToolIT.runIndexTool(true, false, schemaName, tableName, indexName);
@@ -213,12 +130,12 @@
conn2.createStatement().execute("DROP TABLE " + fullTableName);
// check that the physical data table is dropped
- Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- assertEquals(false, admin.tableExists(TableName.valueOf(SchemaUtil.getTableName(schemaName,NEW_TABLE_PREFIX + tableName))));
+ try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+ assertEquals(false, admin.tableExists(TableName.valueOf(SchemaUtil.getTableName(schemaName, NEW_TABLE_PREFIX + tableName))));
- // check that index is dropped
- assertEquals(false, admin.tableExists(TableName.valueOf(fullIndexName)));
-
+ // check that index is dropped
+ assertEquals(false, admin.tableExists(TableName.valueOf(fullIndexName)));
+ }
}
}
}
@@ -231,7 +148,7 @@
try (Connection conn = getConnection(props)) {
try (Connection conn2 = getConnection(props)) {
- testBaseTableWithIndex_BaseTableChange(conn, conn2, schemaName, tableName, indexName);
+ testBaseTableWithIndex_BaseTableChange(conn, conn2, schemaName, tableName, indexName, false, createChildAfterRename);
List<Job>
completedJobs =
@@ -242,7 +159,7 @@
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
- if (createChildAfterTransform) {
+ if (createChildAfterRename) {
assertEquals(3, counters.findCounter(VALID_ROW_COUNT).getValue());
assertEquals(0, counters.findCounter(INVALID_ROW_COUNT).getValue());
} else {
@@ -254,49 +171,6 @@
}
}
- private HashMap<String, ArrayList<String>> test_IndexTableChange(Connection conn, Connection conn2, String schemaName, String tableName, String indexName, byte[] verifiedBytes) throws Exception {
- String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
- String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
- conn.setAutoCommit(true);
- createTable(conn, fullTableName);
- createIndexOnTable(conn, fullTableName, indexName);
- HashMap<String, ArrayList<String>> expected = populateTable(conn, fullTableName, 1, 2);
-
- // Create another hbase table for index and add 1 more row
- String newTableName = "NEW_IDXTBL_" + generateUniqueName();
- String fullNewTableName = SchemaUtil.getTableName(schemaName, newTableName);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices()
- .getAdmin()) {
- String snapshotName = new StringBuilder(indexName).append("-Snapshot").toString();
- admin.snapshot(snapshotName, TableName.valueOf(fullIndexName));
- admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableName));
-
- try (HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullNewTableName))) {
- Put
- put =
- new Put(ByteUtil.concat(Bytes.toBytes("V13"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("PK3")));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
- verifiedBytes);
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("0:V2"),
- PInteger.INSTANCE.toBytes(3));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("0:V3"),
- PInteger.INSTANCE.toBytes(4));
- htable.put(put);
- expected.put("PK3", Lists.newArrayList("PK3", "V13", "3", "4"));
- }
- }
-
- // Query to cache on the second connection
- String selectTable1 = "SELECT * FROM " + fullIndexName;
- ResultSet rs1 = conn2.createStatement().executeQuery(selectTable1);
- assertTrue(rs1.next());
-
- // Rename table to point to the new hbase table
- renameAndDropPhysicalTable(conn, "NULL", schemaName, indexName, newTableName);
-
- return expected;
- }
-
@Test
public void testUpdatePhysicalIndexTableName() throws Exception {
String schemaName = "S_" + generateUniqueName();
@@ -306,7 +180,7 @@
String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
try (Connection conn = getConnection(props)) {
try (Connection conn2 = getConnection(props)) {
- HashMap<String, ArrayList<String>> expected = test_IndexTableChange(conn, conn2, schemaName, tableName, indexName, IndexRegionObserver.VERIFIED_BYTES);
+ HashMap<String, ArrayList<String>> expected = test_IndexTableChange(conn, conn2, schemaName, tableName, indexName, IndexRegionObserver.VERIFIED_BYTES, false);
validateIndex(conn, fullIndexName, false, expected);
validateIndex(conn2, fullIndexName, false, expected);
@@ -314,11 +188,11 @@
// create another index and drop the first index and validate the second one
String indexName2 = "IDX2_" + generateUniqueName();
String fullIndexName2 = SchemaUtil.getTableName(schemaName, indexName2);
- if (createChildAfterTransform) {
+ if (createChildAfterRename) {
createIndexOnTable(conn2, fullTableName, indexName2);
}
dropIndex(conn2, fullTableName, indexName);
- if (!createChildAfterTransform) {
+ if (!createChildAfterRename) {
createIndexOnTable(conn2, fullTableName, indexName2);
}
// The new index doesn't have the new row
@@ -338,7 +212,7 @@
String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
try (Connection conn = getConnection(props)) {
try (Connection conn2 = getConnection(props)) {
- test_IndexTableChange(conn, conn2, schemaName, tableName, indexName, IndexRegionObserver.VERIFIED_BYTES);
+ test_IndexTableChange(conn, conn2, schemaName, tableName, indexName, IndexRegionObserver.VERIFIED_BYTES, false);
List<Job>
completedJobs =
IndexScrutinyToolBaseIT.runScrutinyTool(schemaName, tableName, indexName, 1L,
@@ -356,7 +230,7 @@
// Try with unverified bytes
String tableName2 = "TBL_" + generateUniqueName();
String indexName2 = "IDX_" + generateUniqueName();
- test_IndexTableChange(conn, conn2, schemaName, tableName2, indexName2, IndexRegionObserver.UNVERIFIED_BYTES);
+ test_IndexTableChange(conn, conn2, schemaName, tableName2, indexName2, IndexRegionObserver.UNVERIFIED_BYTES, false);
completedJobs =
IndexScrutinyToolBaseIT.runScrutinyTool(schemaName, tableName2, indexName2, 1L,
@@ -375,201 +249,6 @@
}
}
- private HashMap<String, ArrayList<String>> testWithViewsAndIndex_BaseTableChange(Connection conn, Connection conn2, String schemaName, String tableName, String viewName1, String v1_indexName1, String v1_indexName2, String viewName2, String v2_indexName1) throws Exception {
- conn.setAutoCommit(true);
- String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
- String fullViewName1 = SchemaUtil.getTableName(schemaName, viewName1);
- String fullViewName2 = SchemaUtil.getTableName(schemaName, viewName2);
- createTable(conn, fullTableName);
- HashMap<String, ArrayList<String>> expected = new HashMap<>();
- if (!createChildAfterTransform) {
- createViewAndIndex(conn, schemaName, tableName, viewName1, v1_indexName1);
- createViewAndIndex(conn, schemaName, tableName, viewName1, v1_indexName2);
- createViewAndIndex(conn, schemaName, tableName, viewName2, v2_indexName1);
- expected.putAll(populateView(conn, fullViewName1, 1,2));
- expected.putAll(populateView(conn, fullViewName2, 10,2));
- }
-
- // Create another hbase table and add 1 more row
- String newTableName = "NEW_TBL_" + generateUniqueName();
- String fullNewTableName = SchemaUtil.getTableName(schemaName, newTableName);
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices()
- .getAdmin()) {
- String snapshotName = new StringBuilder(fullTableName).append("-Snapshot").toString();
- admin.snapshot(snapshotName, TableName.valueOf(fullTableName));
- admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewTableName));
-
- try (HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(fullNewTableName))) {
- Put put = new Put(ByteUtil.concat(Bytes.toBytes("PK3")));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
- QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"),
- Bytes.toBytes("V13"));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V2"),
- PInteger.INSTANCE.toBytes(3));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V3"),
- PInteger.INSTANCE.toBytes(4));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("VIEW_COL1"),
- Bytes.toBytes("VIEW_COL1_3"));
- put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("VIEW_COL2"),
- Bytes.toBytes("VIEW_COL2_3"));
- htable.put(put);
- expected.put("PK3", Lists.newArrayList("PK3", "V13", "3", "4", "VIEW_COL1_3", "VIEW_COL2_3"));
- }
- }
-
- // Query to cache on the second connection
- String selectTable1 = "SELECT PK1, V1, V2, V3 FROM " + fullTableName + " ORDER BY PK1 DESC";
- ResultSet rs1 = conn2.createStatement().executeQuery(selectTable1);
- if (!createChildAfterTransform) {
- assertTrue(rs1.next());
- }
-
- // Rename table to point to hbase table
- renameAndDropPhysicalTable(conn, "NULL", schemaName, tableName, newTableName);
-
- conn.unwrap(PhoenixConnection.class).getQueryServices().clearCache();
- if (createChildAfterTransform) {
- createViewAndIndex(conn, schemaName, tableName, viewName1, v1_indexName1);
- createViewAndIndex(conn, schemaName, tableName, viewName1, v1_indexName2);
- createViewAndIndex(conn, schemaName, tableName, viewName2, v2_indexName1);
- expected.putAll(populateView(conn, fullViewName1, 1,2));
- expected.putAll(populateView(conn, fullViewName2, 10,2));
- }
-
- return expected;
- }
-
-
- private PhoenixTestBuilder.SchemaBuilder createGlobalViewAndTenantView() throws Exception {
- int numOfRows = 5;
- PhoenixTestBuilder.SchemaBuilder.TableOptions tableOptions = PhoenixTestBuilder.SchemaBuilder.TableOptions.withDefaults();
- tableOptions.getTableColumns().clear();
- tableOptions.getTableColumnTypes().clear();
- tableOptions.setTableProps(" MULTI_TENANT=true, COLUMN_ENCODED_BYTES=0 "+this.dataTableDdl);
-
- PhoenixTestBuilder.SchemaBuilder.GlobalViewOptions globalViewOptions = PhoenixTestBuilder.SchemaBuilder.GlobalViewOptions.withDefaults();
-
- PhoenixTestBuilder.SchemaBuilder.GlobalViewIndexOptions globalViewIndexOptions =
- PhoenixTestBuilder.SchemaBuilder.GlobalViewIndexOptions.withDefaults();
- globalViewIndexOptions.setLocal(false);
-
- PhoenixTestBuilder.SchemaBuilder.TenantViewOptions tenantViewOptions = new PhoenixTestBuilder.SchemaBuilder.TenantViewOptions();
- tenantViewOptions.setTenantViewColumns(asList("ZID", "COL7", "COL8", "COL9"));
- tenantViewOptions.setTenantViewColumnTypes(asList("CHAR(15)", "VARCHAR", "VARCHAR", "VARCHAR"));
-
- PhoenixTestBuilder.SchemaBuilder.OtherOptions testCaseWhenAllCFMatchAndAllDefault = new PhoenixTestBuilder.SchemaBuilder.OtherOptions();
- testCaseWhenAllCFMatchAndAllDefault.setTestName("testCaseWhenAllCFMatchAndAllDefault");
- testCaseWhenAllCFMatchAndAllDefault
- .setTableCFs(Lists.newArrayList((String) null, null, null));
- testCaseWhenAllCFMatchAndAllDefault
- .setGlobalViewCFs(Lists.newArrayList((String) null, null, null));
- testCaseWhenAllCFMatchAndAllDefault
- .setTenantViewCFs(Lists.newArrayList((String) null, null, null, null));
-
- // Define the test schema.
- PhoenixTestBuilder.SchemaBuilder schemaBuilder = null;
- if (!createChildAfterTransform) {
- schemaBuilder = new PhoenixTestBuilder.SchemaBuilder(getUrl());
- schemaBuilder.withTableOptions(tableOptions).withGlobalViewOptions(globalViewOptions)
- .withGlobalViewIndexOptions(globalViewIndexOptions)
- .withTenantViewOptions(tenantViewOptions)
- .withOtherOptions(testCaseWhenAllCFMatchAndAllDefault).build();
- } else {
- schemaBuilder = new PhoenixTestBuilder.SchemaBuilder(getUrl());
- schemaBuilder.withTableOptions(tableOptions).build();
- }
-
- PTable table = schemaBuilder.getBaseTable();
- String schemaName = table.getSchemaName().getString();
- String tableName = table.getTableName().getString();
- String newBaseTableName = "NEW_TBL_" + tableName;
- String fullNewBaseTableName = SchemaUtil.getTableName(schemaName, newBaseTableName);
- String fullTableName = table.getName().getString();
-
- try (Connection conn = getConnection(props)) {
-
- try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
- String snapshotName = new StringBuilder(fullTableName).append("-Snapshot").toString();
- admin.snapshot(snapshotName, TableName.valueOf(fullTableName));
- admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(fullNewBaseTableName));
- }
-
- renameAndDropPhysicalTable(conn, null, schemaName, tableName, newBaseTableName);
- }
-
- // TODO: this still creates a new table.
- if (createChildAfterTransform) {
- schemaBuilder = new PhoenixTestBuilder.SchemaBuilder(getUrl());
- schemaBuilder.withDataOptions(schemaBuilder.getDataOptions())
- .withTableOptions(tableOptions)
- .withGlobalViewOptions(globalViewOptions)
- .withGlobalViewIndexOptions(globalViewIndexOptions)
- .withTenantViewOptions(tenantViewOptions)
- .withOtherOptions(testCaseWhenAllCFMatchAndAllDefault).build();
- }
-
- // Define the test data.
- PhoenixTestBuilder.DataSupplier dataSupplier = new PhoenixTestBuilder.DataSupplier() {
-
- @Override public List<Object> getValues(int rowIndex) {
- Random rnd = new Random();
- String id = String.format(ViewTTLIT.ID_FMT, rowIndex);
- String zid = String.format(ViewTTLIT.ZID_FMT, rowIndex);
- String col4 = String.format(ViewTTLIT.COL4_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
- String col5 = String.format(ViewTTLIT.COL5_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
- String col6 = String.format(ViewTTLIT.COL6_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
- String col7 = String.format(ViewTTLIT.COL7_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
- String col8 = String.format(ViewTTLIT.COL8_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
- String col9 = String.format(ViewTTLIT.COL9_FMT, rowIndex + rnd.nextInt(MAX_ROWS));
-
- return Lists.newArrayList(
- new Object[] { id, zid, col4, col5, col6, col7, col8, col9 });
- }
- };
-
- // Create a test data reader/writer for the above schema.
- PhoenixTestBuilder.DataWriter dataWriter = new PhoenixTestBuilder.BasicDataWriter();
- List<String> columns =
- Lists.newArrayList("ID", "ZID", "COL4", "COL5", "COL6", "COL7", "COL8", "COL9");
- List<String> rowKeyColumns = Lists.newArrayList("ID", "ZID");
-
- String tenantConnectUrl =
- getUrl() + ';' + TENANT_ID_ATTRIB + '=' + schemaBuilder.getDataOptions().getTenantId();
-
- try (Connection tenantConnection = DriverManager.getConnection(tenantConnectUrl)) {
- tenantConnection.setAutoCommit(true);
- dataWriter.setConnection(tenantConnection);
- dataWriter.setDataSupplier(dataSupplier);
- dataWriter.setUpsertColumns(columns);
- dataWriter.setRowKeyColumns(rowKeyColumns);
- dataWriter.setTargetEntity(schemaBuilder.getEntityTenantViewName());
- dataWriter.upsertRows(1, numOfRows);
- com.google.common.collect.Table<String, String, Object> upsertedData = dataWriter.getDataTable();;
-
- PhoenixTestBuilder.DataReader dataReader = new PhoenixTestBuilder.BasicDataReader();
- dataReader.setValidationColumns(columns);
- dataReader.setRowKeyColumns(rowKeyColumns);
- dataReader.setDML(String.format("SELECT %s from %s", Joiner.on(",").join(columns),
- schemaBuilder.getEntityTenantViewName()));
- dataReader.setTargetEntity(schemaBuilder.getEntityTenantViewName());
- dataReader.setConnection(tenantConnection);
- dataReader.readRows();
- com.google.common.collect.Table<String, String, Object> fetchedData
- = dataReader.getDataTable();
- assertNotNull("Fetched data should not be null", fetchedData);
- ViewTTLIT.verifyRowsBeforeTTLExpiration(upsertedData, fetchedData);
-
- }
- return schemaBuilder;
- }
-
- @Test
- public void testWith2LevelViewsBaseTablePhysicalNameChange() throws Exception {
- // TODO: use namespace in one of the cases
- PhoenixTestBuilder.SchemaBuilder schemaBuilder = createGlobalViewAndTenantView();
- }
-
@Test
public void testUpdatePhysicalTableNameWithViews() throws Exception {
try (Connection conn = getConnection(props)) {
@@ -587,7 +266,8 @@
String fullView2Name = SchemaUtil.getTableName(schemaName, view2Name);
String fullView2IndexName1 = SchemaUtil.getTableName(schemaName, view2IndexName1);
- HashMap<String, ArrayList<String>> expected = testWithViewsAndIndex_BaseTableChange(conn, conn2, schemaName, tableName, view1Name, view1IndexName1, view1IndexName2, view2Name, view2IndexName1);
+ HashMap<String, ArrayList<String>> expected = testWithViewsAndIndex_BaseTableChange(conn, conn2, null,
+ schemaName, tableName, view1Name, view1IndexName1, view1IndexName2, view2Name, view2IndexName1, false, createChildAfterRename);
// We have to rebuild index for this to work
IndexToolIT.runIndexTool(true, false, schemaName, view1Name, view1IndexName1);
@@ -651,8 +331,8 @@
String view2Name = "VW2_" + generateUniqueName();
String view2IndexName1 = "VW2IDX1_" + generateUniqueName();
- testWithViewsAndIndex_BaseTableChange(conn, conn2,schemaName, tableName, view1Name,
- view1IndexName1, view1IndexName2, view2Name, view2IndexName1);
+ testWithViewsAndIndex_BaseTableChange(conn, conn2, null,schemaName, tableName, view1Name,
+ view1IndexName1, view1IndexName2, view2Name, view2IndexName1, false, createChildAfterRename);
List<Job>
completedJobs =
@@ -663,7 +343,7 @@
assertTrue(job.isSuccessful());
Counters counters = job.getCounters();
- if (createChildAfterTransform) {
+ if (createChildAfterRename) {
assertEquals(3, counters.findCounter(VALID_ROW_COUNT).getValue());
assertEquals(2, counters.findCounter(INVALID_ROW_COUNT).getValue());
} else {
@@ -676,139 +356,89 @@
}
}
- private void createTable(Connection conn, String tableName) throws Exception {
- String createTableSql = "CREATE TABLE " + tableName + " (PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 INTEGER, V3 INTEGER "
- + "CONSTRAINT NAME_PK PRIMARY KEY(PK1)) COLUMN_ENCODED_BYTES=0 " + dataTableDdl;
- LOGGER.debug(createTableSql);
- conn.createStatement().execute(createTableSql);
- }
-
- private void createIndexOnTable(Connection conn, String tableName, String indexName)
- throws SQLException {
- String createIndexSql = "CREATE INDEX " + indexName + " ON " + tableName + " (V1) INCLUDE (V2, V3) ";
- LOGGER.debug(createIndexSql);
- conn.createStatement().execute(createIndexSql);
- }
-
- private void dropIndex(Connection conn, String tableName, String indexName)
- throws SQLException {
- String sql = "DROP INDEX " + indexName + " ON " + tableName ;
- conn.createStatement().execute(sql);
- }
-
- private HashMap<String, ArrayList<String>> populateTable(Connection conn, String tableName, int startnum, int numOfRows)
- throws SQLException {
- String upsert = "UPSERT INTO " + tableName + " (PK1, V1, V2, V3) VALUES (?,?,?,?)";
- PreparedStatement upsertStmt = conn.prepareStatement(upsert);
- HashMap<String, ArrayList<String>> result = new HashMap<>();
- for (int i=startnum; i < startnum + numOfRows; i++) {
- ArrayList<String> row = new ArrayList<>();
- upsertStmt.setString(1, "PK" + i);
- row.add("PK" + i);
- upsertStmt.setString(2, "V1" + i);
- row.add("V1" + i);
- upsertStmt.setInt(3, i);
- row.add(String.valueOf(i));
- upsertStmt.setInt(4, i + 1);
- row.add(String.valueOf(i + 1));
- upsertStmt.executeUpdate();
- result.put("PK" + i, row);
- }
- return result;
- }
-
- private HashMap<String, ArrayList<String>> populateView(Connection conn, String viewName, int startNum, int numOfRows) throws SQLException {
- String upsert = "UPSERT INTO " + viewName + " (PK1, V1, V2, V3, VIEW_COL1, VIEW_COL2) VALUES (?,?,?,?,?,?)";
- PreparedStatement upsertStmt = conn.prepareStatement(upsert);
- HashMap<String, ArrayList<String>> result = new HashMap<>();
- for (int i=startNum; i < startNum + numOfRows; i++) {
- ArrayList<String> row = new ArrayList<>();
- upsertStmt.setString(1, "PK"+i);
- row.add("PK"+i);
- upsertStmt.setString(2, "V1"+i);
- row.add("V1"+i);
- upsertStmt.setInt(3, i);
- row.add(String.valueOf(i));
- upsertStmt.setInt(4, i+1);
- row.add(String.valueOf(i+1));
- upsertStmt.setString(5, "VIEW_COL1_"+i);
- row.add("VIEW_COL1_"+i);
- upsertStmt.setString(6, "VIEW_COL2_"+i);
- row.add("VIEW_COL2_"+i);
- upsertStmt.executeUpdate();
- result.put("PK"+i, row);
- }
- return result;
- }
-
- private void createViewAndIndex(Connection conn, String schemaName, String tableName, String viewName, String viewIndexName)
- throws SQLException {
+ @Test
+ public void testWith2LevelViewsBaseTablePhysicalNameChange() throws Exception {
+ String schemaName = "S_" + generateUniqueName();
+ String tableName = "TBL_" + generateUniqueName();
+ String view1Name = "VW1_" + generateUniqueName();
+ String level2ViewName = "VW1_CH1_" + generateUniqueName();
+ String fullLevel2ViewName = SchemaUtil.getTableName(schemaName, level2ViewName);
+ String view1IndexName1 = "VW1IDX1_" + generateUniqueName();
+ String level2ViewIndexName = "VW1_CH1IDX_" + generateUniqueName();
+ String fullView1Name = SchemaUtil.getTableName(schemaName, view1Name);
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
- String fullViewName = SchemaUtil.getTableName(schemaName, viewName);
- String
- view1DDL =
- "CREATE VIEW IF NOT EXISTS " + fullViewName + " ( VIEW_COL1 VARCHAR, VIEW_COL2 VARCHAR) AS SELECT * FROM "
- + fullTableName;
- conn.createStatement().execute(view1DDL);
- String indexDDL = "CREATE INDEX IF NOT EXISTS " + viewIndexName + " ON " + fullViewName + " (V1) include (V2, V3, VIEW_COL2) ";
- conn.createStatement().execute(indexDDL);
- conn.commit();
- }
+ try (Connection conn = getConnection(props)) {
+ try (Connection conn2 = getConnection(props)) {
+ conn.setAutoCommit(true);
+ conn2.setAutoCommit(true);
+ HashMap<String, ArrayList<String>> expected = new HashMap<>();
+ createTable(conn, fullTableName);
+ createViewAndIndex(conn2, schemaName, tableName, view1Name, view1IndexName1);
+ createViewAndIndex(conn2, schemaName, tableName, view1Name, view1IndexName1);
+ expected.putAll(populateView(conn, fullView1Name, 1, 2));
- private void validateTable(Connection connection, String tableName) throws SQLException {
- String selectTable = "SELECT PK1, V1, V2, V3 FROM " + tableName + " ORDER BY PK1 DESC";
- ResultSet rs = connection.createStatement().executeQuery(selectTable);
- assertTrue(rs.next());
- assertEquals("PK3", rs.getString(1));
- assertEquals("V13", rs.getString(2));
- assertEquals(3, rs.getInt(3));
- assertEquals(4, rs.getInt(4));
- assertTrue(rs.next());
- assertEquals("PK2", rs.getString(1));
- assertEquals("V12", rs.getString(2));
- assertEquals(2, rs.getInt(3));
- assertEquals(3, rs.getInt(4));
- assertTrue(rs.next());
- assertEquals("PK1", rs.getString(1));
- assertEquals("V11", rs.getString(2));
- assertEquals(1, rs.getInt(3));
- assertEquals(2, rs.getInt(4));
- }
+ String ddl = "CREATE VIEW " + fullLevel2ViewName + "(chv2 VARCHAR) AS SELECT * FROM " + fullView1Name;
+ String
+ indexDdl =
+ "CREATE INDEX " + level2ViewIndexName + " ON " + fullLevel2ViewName + " (chv2) INCLUDE (v1, VIEW_COL1)";
+ if (!createChildAfterRename) {
+ conn.createStatement().execute(ddl);
+ conn.createStatement().execute(indexDdl);
+ }
- private void validateIndex(Connection connection, String tableName, boolean isViewIndex, HashMap<String, ArrayList<String>> expected) throws SQLException {
- String selectTable = "SELECT * FROM " + tableName;
- ResultSet rs = connection.createStatement().executeQuery(selectTable);
- int cnt = 0;
- while (rs.next()) {
- String pk = rs.getString(2);
- assertTrue(expected.containsKey(pk));
- ArrayList<String> row = expected.get(pk);
- assertEquals(row.get(1), rs.getString(1));
- assertEquals(row.get(2), rs.getString(3));
- assertEquals(row.get(3), rs.getString(4));
- if (isViewIndex) {
- assertEquals(row.get(5), rs.getString(5));
+ String newTableName = NEW_TABLE_PREFIX + generateUniqueName();
+ String fullTableHbaseName = SchemaUtil.getTableName(schemaName, tableName);
+ createAndPointToNewPhysicalTable(conn, fullTableHbaseName, newTableName, false);
+
+ conn.unwrap(PhoenixConnection.class).getQueryServices().clearCache();
+ if (createChildAfterRename) {
+ conn.createStatement().execute(ddl);
+ conn.createStatement().execute(indexDdl);
+ }
+
+ // Add new row to child view
+ String upsert = "UPSERT INTO " + fullLevel2ViewName + " (PK1, V1, VIEW_COL1, CHV2) VALUES (?,?,?,?)";
+ PreparedStatement upsertStmt = conn.prepareStatement(upsert);
+ ArrayList<String> row = new ArrayList<>();
+ upsertStmt.setString(1, "PK10");
+ upsertStmt.setString(2, "V10");
+ upsertStmt.setString(3, "VIEW_COL1_10");
+ upsertStmt.setString(4, "CHV210");
+ upsertStmt.executeUpdate();
+
+ String selectFromL2View = "SELECT /*+ NO_INDEX */ * FROM " + fullLevel2ViewName + " WHERE chv2='CHV210'";
+ ResultSet
+ rs =
+ conn2.createStatement().executeQuery(selectFromL2View);
+ assertEquals(true, rs.next());
+ assertEquals(false, rs.next());
+
+ String indexSelect = "SELECT chv2, V1, VIEW_COL1 FROM " + fullLevel2ViewName + " WHERE chv2='CHV210'";
+ rs =
+ conn2.createStatement().executeQuery("EXPLAIN " + indexSelect);
+ assertEquals(true, QueryUtil.getExplainPlan(rs).contains(VIEW_INDEX_TABLE_PREFIX));
+ rs = conn2.createStatement().executeQuery(indexSelect);
+ assertEquals(true, rs.next());
+ assertEquals(false, rs.next());
+
+ // Drop row and check
+ conn2.createStatement().execute("DELETE FROM " + fullLevel2ViewName + " WHERE chv2='CHV210'");
+ rs = conn2.createStatement().executeQuery(indexSelect);
+ assertEquals(false, rs.next());
}
- cnt++;
}
- assertEquals(cnt, expected.size());
}
- public static void renameAndDropPhysicalTable(Connection conn, String tenantId, String schema, String tableName, String physicalName) throws Exception {
- String
- changeName =
- String.format(
- "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, PHYSICAL_TABLE_NAME) VALUES (%s, '%s', '%s', NULL, NULL, '%s')",
- tenantId, schema, tableName, physicalName);
- conn.createStatement().execute(changeName);
- conn.commit();
-
- String fullTableName = SchemaUtil.getTableName(schema, tableName);
- Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
- TableName hTableName = TableName.valueOf(fullTableName);
- admin.disableTable(hTableName);
- admin.deleteTable(hTableName);
- conn.unwrap(PhoenixConnection.class).getQueryServices()
- .clearCache();
+ @Test
+ public void testHashJoin() throws Exception {
+ if (immutable || createChildAfterRename) {
+ return;
+ }
+ Object[] arr = HashJoinGlobalIndexIT.data().toArray();
+ String[] indexDDL = ((String[][])arr[0])[0];
+ String[] plans = ((String[][])arr[0])[1];
+ HashJoinGlobalIndexIT hjgit = new HashJoinGlobalIndexIT(indexDDL, plans);
+ hjgit.createSchema();
+ hjgit.testInnerJoin(false);
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinIT.java
index 56ec7f2..693c400 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinIT.java
@@ -37,10 +37,12 @@
import java.util.Properties;
import org.apache.phoenix.cache.ServerCacheClient;
+import org.apache.phoenix.end2end.LogicalTableNameBaseIT;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.SchemaUtil;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -52,7 +54,63 @@
public HashJoinIT(String[] indexDDL, String[] plans) {
super(indexDDL, plans);
}
-
+
+ public void testInnerJoin(boolean renamePhysicalTable) throws Exception {
+ Connection conn = getConnection();
+ String tableName1 = getTableName(conn, JOIN_ITEM_TABLE_FULL_NAME);
+ String tableName2 = getTableName(conn, JOIN_SUPPLIER_TABLE_FULL_NAME);
+ String fullNameRealItemTable =getTableNameMap().get(JOIN_ITEM_TABLE_FULL_NAME);
+ String fullNameSupplierTable =getTableNameMap().get(JOIN_SUPPLIER_TABLE_FULL_NAME);
+ if (renamePhysicalTable) {
+ LogicalTableNameBaseIT.createAndPointToNewPhysicalTable(conn, fullNameRealItemTable, false);
+ LogicalTableNameBaseIT.createAndPointToNewPhysicalTable(conn, fullNameSupplierTable, false);
+ }
+ String query = "SELECT item.\"item_id\", item.name, supp.\"supplier_id\", supp.name, next value for " + seqName + " FROM " + tableName1 + " item INNER JOIN " + tableName2 + " supp ON item.\"supplier_id\" = supp.\"supplier_id\"";
+ try {
+ PreparedStatement statement = conn.prepareStatement(query);
+ ResultSet rs = statement.executeQuery();
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "0000000001");
+ assertEquals(rs.getString(2), "T1");
+ assertEquals(rs.getString(3), "0000000001");
+ assertEquals(rs.getString(4), "S1");
+ assertEquals(1, rs.getInt(5));
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "0000000002");
+ assertEquals(rs.getString(2), "T2");
+ assertEquals(rs.getString(3), "0000000001");
+ assertEquals(rs.getString(4), "S1");
+ assertEquals(2, rs.getInt(5));
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "0000000003");
+ assertEquals(rs.getString(2), "T3");
+ assertEquals(rs.getString(3), "0000000002");
+ assertEquals(rs.getString(4), "S2");
+ assertEquals(3, rs.getInt(5));
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "0000000004");
+ assertEquals(rs.getString(2), "T4");
+ assertEquals(rs.getString(3), "0000000002");
+ assertEquals(rs.getString(4), "S2");
+ assertEquals(4, rs.getInt(5));
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "0000000005");
+ assertEquals(rs.getString(2), "T5");
+ assertEquals(rs.getString(3), "0000000005");
+ assertEquals(rs.getString(4), "S5");
+ assertEquals(5, rs.getInt(5));
+ assertTrue (rs.next());
+ assertEquals(rs.getString(1), "0000000006");
+ assertEquals(rs.getString(2), "T6");
+ assertEquals(rs.getString(3), "0000000006");
+ assertEquals(rs.getString(4), "S6");
+ assertEquals(6, rs.getInt(5));
+
+ assertFalse(rs.next());
+ } finally {
+ conn.close();
+ }
+ }
@Test
public void testDefaultJoin() throws Exception {
@@ -102,54 +160,7 @@
@Test
public void testInnerJoin() throws Exception {
- Connection conn = getConnection();
- String tableName1 = getTableName(conn, JOIN_ITEM_TABLE_FULL_NAME);
- String tableName2 = getTableName(conn, JOIN_SUPPLIER_TABLE_FULL_NAME);
- String query = "SELECT item.\"item_id\", item.name, supp.\"supplier_id\", supp.name, next value for " + seqName + " FROM " + tableName1 + " item INNER JOIN " + tableName2 + " supp ON item.\"supplier_id\" = supp.\"supplier_id\"";
- try {
- PreparedStatement statement = conn.prepareStatement(query);
- ResultSet rs = statement.executeQuery();
- assertTrue (rs.next());
- assertEquals(rs.getString(1), "0000000001");
- assertEquals(rs.getString(2), "T1");
- assertEquals(rs.getString(3), "0000000001");
- assertEquals(rs.getString(4), "S1");
- assertEquals(1, rs.getInt(5));
- assertTrue (rs.next());
- assertEquals(rs.getString(1), "0000000002");
- assertEquals(rs.getString(2), "T2");
- assertEquals(rs.getString(3), "0000000001");
- assertEquals(rs.getString(4), "S1");
- assertEquals(2, rs.getInt(5));
- assertTrue (rs.next());
- assertEquals(rs.getString(1), "0000000003");
- assertEquals(rs.getString(2), "T3");
- assertEquals(rs.getString(3), "0000000002");
- assertEquals(rs.getString(4), "S2");
- assertEquals(3, rs.getInt(5));
- assertTrue (rs.next());
- assertEquals(rs.getString(1), "0000000004");
- assertEquals(rs.getString(2), "T4");
- assertEquals(rs.getString(3), "0000000002");
- assertEquals(rs.getString(4), "S2");
- assertEquals(4, rs.getInt(5));
- assertTrue (rs.next());
- assertEquals(rs.getString(1), "0000000005");
- assertEquals(rs.getString(2), "T5");
- assertEquals(rs.getString(3), "0000000005");
- assertEquals(rs.getString(4), "S5");
- assertEquals(5, rs.getInt(5));
- assertTrue (rs.next());
- assertEquals(rs.getString(1), "0000000006");
- assertEquals(rs.getString(2), "T6");
- assertEquals(rs.getString(3), "0000000006");
- assertEquals(rs.getString(4), "S6");
- assertEquals(6, rs.getInt(5));
-
- assertFalse(rs.next());
- } finally {
- conn.close();
- }
+ testInnerJoin(false);
}
@Test
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 05f2f93..7fb9cdb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -125,7 +125,9 @@
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.ipc.RpcServer.Call;
import org.apache.hadoop.hbase.ipc.RpcUtil;
@@ -973,6 +975,25 @@
return table;
}
+ private PName getPhysicalTableName(Region region, byte[] tenantId, byte[] schema, byte[] table, long timestamp) throws IOException {
+ byte[] key = SchemaUtil.getTableKey(tenantId, schema, table);
+ Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP,
+ timestamp);
+ scan.addColumn(TABLE_FAMILY_BYTES, PHYSICAL_TABLE_NAME_BYTES);
+ try (RegionScanner scanner = region.getScanner(scan)) {
+ List<Cell> results = Lists.newArrayList();
+ scanner.next(results);
+ Cell physicalTableNameKv = null;
+ if (results.size() > 0) {
+ physicalTableNameKv = results.get(0);
+ }
+ PName physicalTableName =
+ physicalTableNameKv != null ? newPName(physicalTableNameKv.getValueArray(),
+ physicalTableNameKv.getValueOffset(), physicalTableNameKv.getValueLength()) : null;
+ return physicalTableName;
+ }
+ }
+
private PTable getTable(RegionScanner scanner, long clientTimeStamp, long tableTimeStamp,
int clientVersion)
throws IOException, SQLException {
@@ -1246,7 +1267,17 @@
}
if (parentTable == null) {
- physicalTables.add(famName);
+ if (indexType == IndexType.LOCAL) {
+ PName tablePhysicalName = getPhysicalTableName(env.getRegion(),null, SchemaUtil.getSchemaNameFromFullName(famName.getBytes()).getBytes(),
+ SchemaUtil.getTableNameFromFullName(famName.getBytes()).getBytes(), clientTimeStamp);
+ if (tablePhysicalName == null) {
+ physicalTables.add(famName);
+ } else {
+ physicalTables.add(SchemaUtil.getPhysicalHBaseTableName(schemaName, tablePhysicalName, isNamespaceMapped));
+ }
+ } else {
+ physicalTables.add(famName);
+ }
// If this is a view index, then one of the link is IDX_VW -> _IDX_ PhysicalTable link. Since famName is _IDX_ and we can't get this table hence it is null, we need to use actual view name
parentLogicalName = (tableType == INDEX ? SchemaUtil.getTableName(parentSchemaName, parentTableName) : famName);
} else {