PHOENIX-6693 Remove Hbase 2.1 and Hbase 2.2 support from Phoenix
diff --git a/BUILDING.md b/BUILDING.md
index e7a1f7d..c50634d 100644
--- a/BUILDING.md
+++ b/BUILDING.md
@@ -29,18 +29,18 @@
HBase 2 and Hadoop 3
--------------------
-Phoenix 5.x requires Hadoop 3. While HBase 2.x is compatible with Hadoop 3, the public Maven Hbase
+Phoenix 5.x requires Hadoop 3. While HBase 2.x is compatible with Hadoop 3, the public Maven HBase
artifacts are built with Hadoop 2, and are not.
For this reason, when building Phoenix, you need to rebuild
HBase with Hadoop 3, and install it to the local maven repo of the build host.
-`$ wget https://downloads.apache.org/hbase/2.2.5/hbase-2.2.5-src.tar.gz`
-`$ tar xfvz hbase-2.2.5-src.tar.gz`
-`$ cd hbase-2.2.5`
+`$ wget https://downloads.apache.org/hbase/2.4.10/hbase-2.4.10-src.tar.gz`
+`$ tar xfvz hbase-2.4.10-src.tar.gz`
+`$ cd hbase-2.4.10`
`$ mvn install -Dhadoop.profile=3.0 -DskipTests`
-Replace 2.2.5 with the actual Hbase version you are using in the Phoenix build.
+Replace 2.4.10 with the actual Hbase version you are using in the Phoenix build.
You can find the exact HBase version each phoenix HBase profile uses by checking <hbase.version>
in the corresponding profile section at the end of phoenix/pom.xml, or you can specify the HBase
@@ -81,8 +81,8 @@
setting the `hbase.version` system property.
* `mvn clean install` will use the latest known patch release of the the latest supported HBase 2 minor relese
- * `mvn clean install -Dhbase.profile=2.1` will use the latest known patch release of HBase 2.1
- * `mvn clean install -Dhbase.profile=2.1 -Dhbase.version=2.1.7` will build with HBase 2.1.7
+ * `mvn clean install -Dhbase.profile=2.4` will use the latest known patch release of HBase 2.4
+ * `mvn clean install -Dhbase.profile=2.4 -Dhbase.version=2.4.5` will build with HBase 2.4.5
Phoenix verifies the specified `hbase.profile` and `hbase.version` properties, and will reject
combinations that are known not to work. You may disable this verification by adding
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index ba772a4..1e77bbe 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -40,7 +40,6 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
- <version>${maven-enforcer-plugin.version}</version>
<configuration>
<rules>
<evaluateBeanshell>
@@ -57,13 +56,7 @@
hbasePatch = Integer.parseInt(versionMatcher.group(3));
hbaseMajor == 2 && (
- ("${hbase.compat.version}".equals("2.1.6")
- && hbaseMinor == 1
- && hbasePatch >=6)
- || ("${hbase.compat.version}".equals("2.2.5")
- && hbaseMinor == 2
- && hbasePatch >=5)
- || ("${hbase.compat.version}".equals("2.3.0")
+ ("${hbase.compat.version}".equals("2.3.0")
&& hbaseMinor == 3
&& hbasePatch >=0)
|| ("${hbase.compat.version}".equals("2.4.0")
@@ -338,7 +331,6 @@
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-zookeeper</artifactId>
</dependency>
-
<dependency>
<groupId>org.apache.hbase.thirdparty</groupId>
<artifactId>hbase-shaded-miscellaneous</artifactId>
@@ -546,6 +538,10 @@
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper-jute</artifactId>
+ </dependency>
+ <dependency>
<groupId>org.apache.thrift</groupId>
<artifactId>libthrift</artifactId>
</dependency>
@@ -581,82 +577,4 @@
</dependency>
</dependencies>
-
-<profiles>
- <profile>
- <id>phoenix-hbase-compat-2.3.0</id>
- <!-- keep dependency plugin happy -->
- <activation>
- <property>
- <name>hbase.profile</name>
- <value>2.3</value>
- </property>
- </activation>
- <properties>
- <hbase.profile>2.3</hbase.profile>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper-jute</artifactId>
- </dependency>
- </dependencies>
- </profile>
- <profile>
- <id>phoenix-hbase-compat-2.4.0</id>
- <!-- keep dependency plugin happy -->
- <activation>
- <property>
- <name>hbase.profile</name>
- <value>2.4.0</value>
- </property>
- </activation>
- <properties>
- <hbase.profile>2.4.0</hbase.profile>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper-jute</artifactId>
- </dependency>
- </dependencies>
- </profile>
- <profile>
- <id>phoenix-hbase-compat-2.4.1</id>
- <!-- keep dependency plugin happy -->
- <activation>
- <property>
- <name>hbase.profile</name>
- <value>2.4</value>
- </property>
- </activation>
- <properties>
- <hbase.profile>2.4</hbase.profile>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper-jute</artifactId>
- </dependency>
- </dependencies>
- </profile>
- <profile>
- <id>phoenix-hbase-compat-2.4.1-default</id>
- <!-- keep dependency plugin happy -->
- <activation>
- <property>
- <name>!hbase.profile</name>
- </property>
- </activation>
- <properties>
- <hbase.profile>2.4</hbase.profile>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper-jute</artifactId>
- </dependency>
- </dependencies>
- </profile>
-</profiles>
</project>
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index ed75413..cf1d943 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -65,6 +65,7 @@
import org.apache.phoenix.hbase.index.covered.CoveredColumn;
import org.apache.phoenix.hbase.index.covered.CoveredColumnIndexSpecifierBuilder;
import org.apache.phoenix.hbase.index.util.TestIndexManagementUtil;
+import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.ConfigUtil;
import org.junit.After;
@@ -157,7 +158,7 @@
@After
public void tearDown() throws Exception {
- boolean refCountLeaked = CompatUtil.isAnyStoreRefCountLeaked(
+ boolean refCountLeaked = BaseTest.isAnyStoreRefCountLeaked(
UTIL.getAdmin());
UTIL.shutdownMiniHBaseCluster();
UTIL.shutdownMiniDFSCluster();
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index ab08cec..995a292 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -60,6 +60,7 @@
import org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.MavenCoordinates;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.SystemTaskSplitPolicy;
@@ -125,7 +126,7 @@
@After
public synchronized void cleanUpAfterTest() throws Exception {
- boolean refCountLeaked = CompatUtil.isAnyStoreRefCountLeaked(hbaseTestUtil.getAdmin());
+ boolean refCountLeaked = BaseTest.isAnyStoreRefCountLeaked(hbaseTestUtil.getAdmin());
ConnectionFactory.shutdown();
try {
DriverManager.deregisterDriver(PhoenixDriver.INSTANCE);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityTestUtil.java
index e7c7d88..fb4c7bf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityTestUtil.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityTestUtil.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.util.VersionInfo;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
@@ -283,7 +283,7 @@
// connection is set to be the phoenix version timestamp
// (31 as of now: MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 / MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0)
// Hence, keeping value: 15
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(15));
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index 8154ba0..f093ea0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -27,7 +27,6 @@
import java.util.List;
import java.util.Properties;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
import org.apache.phoenix.util.PropertiesUtil;
import org.junit.runner.RunWith;
@@ -154,7 +153,7 @@
List<Object> testCases = Lists.newArrayList();
for (String indexDDL : INDEX_DDLS) {
for (boolean columnEncoded : new boolean[]{false, true}) {
- testCases.add(new Object[] { indexDDL, columnEncoded, !HbaseCompatCapabilities.isLookbackBeyondDeletesSupported()});
+ testCases.add(new Object[] { indexDDL, columnEncoded, true});
}
}
return testCases;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
index 48768ea..5171f2b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -29,7 +29,7 @@
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.mapreduce.index.IndexTool;
import org.apache.phoenix.query.QueryServices;
@@ -78,7 +78,7 @@
public static synchronized void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
props.put(QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, Long.toString(0));
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(MAX_LOOKBACK_AGE));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentUpsertsWithoutIndexedColsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentUpsertsWithoutIndexedColsIT.java
index 2e05ccf..f20ec32 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentUpsertsWithoutIndexedColsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentUpsertsWithoutIndexedColsIT.java
@@ -19,7 +19,7 @@
package org.apache.phoenix.end2end;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap;
@@ -60,7 +60,7 @@
private static final Map<String, String> PROPS = ImmutableMap.of(
QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB,
Long.toString(0),
- CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(1000000));
@BeforeClass
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
index 0f143af..bea2d25 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
@@ -36,9 +36,9 @@
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.ExplainPlanAttributes;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.IndexRebuildRegionScanner;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
@@ -100,7 +100,7 @@
public static synchronized void doSetup() throws Exception {
Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
- serverProps.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60)); // An hour
+ serverProps.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60)); // An hour
Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString());
clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString());
@@ -392,7 +392,7 @@
// Verify that the index table is not in the ACTIVE state
assertFalse(checkIndexState(conn, indexFullName, PIndexState.ACTIVE, 0L));
- if (CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
+ if (BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
// Run the index MR job and verify that the index table rebuild fails
IndexToolIT.runIndexTool(false, schemaName, dataTableName,
indexTableName, null, -1, IndexTool.IndexVerifyType.AFTER);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexRepairRegionScannerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexRepairRegionScannerIT.java
index 5a0c998..78676c1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexRepairRegionScannerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexRepairRegionScannerIT.java
@@ -31,8 +31,7 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.GlobalIndexRegionScanner;
import org.apache.phoenix.coprocessor.IndexRepairRegionScanner;
import org.apache.phoenix.hbase.index.IndexRegionObserver;
@@ -94,7 +93,6 @@
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
@Category(NeedsOwnMiniClusterTest.class)
@RunWith(Parameterized.class)
@@ -139,7 +137,7 @@
public static synchronized void doSetup() throws Exception {
// below settings are needed to enforce major compaction
Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(0));
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(0));
props.put(QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, Long.toString(0));
// to force multiple verification tasks to be spawned so that we can exercise the page splitting logic
props.put(GlobalIndexRegionScanner.INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY, Long.toString(2));
@@ -686,7 +684,6 @@
@Test
public void testFromIndexToolForIncrementalVerify() throws Exception {
- assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
final int NROWS = 4;
ManualEnvironmentEdge customEdge = new ManualEnvironmentEdge();
String schemaName = generateUniqueName();
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyWithMaxLookbackIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyWithMaxLookbackIT.java
index c6932a9..af03112 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyWithMaxLookbackIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyWithMaxLookbackIT.java
@@ -21,8 +21,7 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.mapreduce.index.IndexScrutinyMapper;
import org.apache.phoenix.mapreduce.index.IndexScrutinyTableOutput;
import org.apache.phoenix.mapreduce.index.IndexScrutinyTool;
@@ -35,7 +34,6 @@
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.util.TestUtil;
-import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -81,7 +79,7 @@
public static synchronized void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(2);
props.put(QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, Long.toString(0));
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(MAX_LOOKBACK));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
@@ -100,7 +98,6 @@
@Test
public void testScrutinyOnRowsBeyondMaxLookBack() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isLookbackBeyondDeletesSupported());
setupTables();
try {
upsertDataAndScrutinize(dataTableName, dataTableFullName, testClock);
@@ -113,7 +110,6 @@
@Test
public void testScrutinyOnRowsBeyondMaxLookback_viewIndex() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isLookbackBeyondDeletesSupported());
schema = "S"+generateUniqueName();
dataTableName = "T"+generateUniqueName();
dataTableFullName = SchemaUtil.getTableName(schema,dataTableName);
@@ -163,7 +159,6 @@
@Test
public void testScrutinyOnDeletedRowsBeyondMaxLookBack() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isLookbackBeyondDeletesSupported());
setupTables();
try {
upsertDataThenDeleteAndScrutinize(dataTableName, dataTableFullName, testClock);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java
index 98d9a4d..edfdca6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForNonTxGlobalIndexIT.java
@@ -43,8 +43,7 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.mapreduce.Counters;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.IndexRebuildRegionScanner;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.mapreduce.index.IndexTool;
@@ -169,7 +168,7 @@
serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
serverProps.put(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS, Long.toString(8));
- serverProps.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ serverProps.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Long.toString(MAX_LOOKBACK_AGE));
serverProps.put(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
Long.toString(Long.MAX_VALUE));
@@ -282,7 +281,7 @@
indexTableName, dataTableFullName));
IndexTool indexTool = IndexToolIT.runIndexTool(useSnapshot, schemaName, dataTableName, indexTableName, null, 0,
IndexTool.IndexVerifyType.ONLY);
- if (CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
+ if (BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
Cell cell =
IndexToolIT.getErrorMessageFromIndexToolOutputTable(conn, dataTableFullName,
indexTableFullName);
@@ -427,7 +426,7 @@
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_VALID_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT).getValue());
- if (CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
+ if (BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
assertEquals(NROWS, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT).getValue());
} else {
@@ -600,7 +599,7 @@
IndexRebuildRegionScanner.setIgnoreIndexRebuildForTesting(true);
conn.createStatement().execute(String.format(
"CREATE INDEX %s ON %s (NAME) INCLUDE (ZIP) ASYNC " + this.indexDDLOptions, indexTableName, viewFullName));
- if (CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
+ if (BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
// Run the index MR job and verify that the index table rebuild fails
IndexToolIT.runIndexTool(useSnapshot, schemaName, viewName, indexTableName,
null, -1, IndexTool.IndexVerifyType.AFTER);
@@ -624,7 +623,7 @@
@Test
public void testIndexToolFailedMapperNotRecordToResultTable() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported() && mutable && singleCell);
+ Assume.assumeTrue(mutable && singleCell);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
String schemaName = generateUniqueName();
@@ -684,7 +683,7 @@
// called PHOENIX_INDEX_TOOL
IndexToolIT.runIndexTool(useSnapshot, schemaName, dataTableName, indexTableName,
null, 0, IndexTool.IndexVerifyType.ONLY);
- if (CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
+ if (BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
Cell cell = IndexToolIT.getErrorMessageFromIndexToolOutputTable(conn, dataTableFullName, indexTableFullName);
try {
String expectedErrorMsg = IndexRebuildRegionScanner.ERROR_MESSAGE_MISSING_INDEX_ROW;
@@ -711,7 +710,6 @@
@Test
public void testIndexToolForIncrementalRebuild() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
String schemaName = generateUniqueName();
String dataTableName = generateUniqueName();
String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTableName);
@@ -778,7 +776,6 @@
@Test
public void testIndexToolForIncrementalVerify() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
ManualEnvironmentEdge customEdge = new ManualEnvironmentEdge();
String schemaName = generateUniqueName();
String dataTableName = generateUniqueName();
@@ -908,7 +905,6 @@
@Test
public void testIndexToolForIncrementalVerify_viewIndex() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
ManualEnvironmentEdge customeEdge = new ManualEnvironmentEdge();
String schemaName = generateUniqueName();
String dataTableName = generateUniqueName();
@@ -1078,7 +1074,7 @@
truncateIndexAndIndexToolTables(indexTableFullName);
boolean MaxLookbackEnabled =
- CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration());
+ BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration());
//now check that disabling logging AFTER leaves only the BEFORE logs on a BOTH run
assertDisableLogging(conn, MaxLookbackEnabled ? 2 : 0, IndexTool.IndexVerifyType.BOTH,
IndexTool.IndexDisableLoggingType.AFTER,
@@ -1110,7 +1106,6 @@
@Test
public void testEnableOutputLoggingForMaxLookback() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isMaxLookbackTimeSupported());
//by default we don't log invalid or missing rows past max lookback age to the
// PHOENIX_INDEX_TOOL table. Verify that we can flip that logging on from the client-side
// using a system property (such as from the command line) and have it log rows on the
@@ -1217,7 +1212,7 @@
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_VALID_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_EXPIRED_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT).getValue());
- if (CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
+ if (BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
assertEquals(NROWS, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT).getValue());
assertEquals(0, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT).getValue());
} else {
@@ -1276,7 +1271,6 @@
@Test
public void testIncrementalRebuildWithPageSize() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
String schemaName = generateUniqueName();
String dataTableName = generateUniqueName();
String fullDataTableName = SchemaUtil.getTableName(schemaName, dataTableName);
@@ -1332,7 +1326,6 @@
@Test
public void testUpdatablePKFilterViewIndexRebuild() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
if (!mutable) {
return;
}
@@ -1411,7 +1404,6 @@
@Test
public void testUpdatableNonPkFilterViewIndexRebuild() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
if (!mutable) {
return;
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolTimeRangeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolTimeRangeIT.java
index a2f9e03..599b050 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolTimeRangeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolTimeRangeIT.java
@@ -24,7 +24,6 @@
import java.sql.SQLException;
import java.util.Map;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
import org.apache.phoenix.mapreduce.index.IndexTool;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
@@ -35,7 +34,6 @@
import org.apache.phoenix.util.SchemaUtil;
import org.junit.AfterClass;
import org.junit.Assert;
-import org.junit.Assume;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -136,7 +134,6 @@
@Test
public void testValidTimeRange() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
String [] args = {"--delete-all-and-rebuild",
"--start-time", myClock.getRelativeTimeAsString(1),
"--end-time", myClock.getRelativeTimeAsString(9)};
@@ -147,7 +144,6 @@
@Test
public void testValidTimeRange_startTimeInBetween() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
String [] args = {"--delete-all-and-rebuild",
"--start-time", myClock.getRelativeTimeAsString(6),
"--end-time", myClock.getRelativeTimeAsString(9)};
@@ -158,7 +154,6 @@
@Test
public void testValidTimeRange_endTimeInBetween() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
String [] args = {"--delete-all-and-rebuild",
"--start-time", myClock.getRelativeTimeAsString(1),
"--end-time", myClock.getRelativeTimeAsString(6)};
@@ -177,7 +172,6 @@
@Test
public void testValidTimeRange_onlyStartTimePassed() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
//starttime passed of last upsert
String [] args = {"--delete-all-and-rebuild",
"--start-time", myClock.getRelativeTimeAsString(8)};
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexVerificationOldDesignIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexVerificationOldDesignIT.java
index 86095c5..2943be1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexVerificationOldDesignIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexVerificationOldDesignIT.java
@@ -17,7 +17,7 @@
*/
package org.apache.phoenix.end2end;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.phoenix.mapreduce.index.IndexTool;
import org.apache.phoenix.query.BaseTest;
@@ -60,7 +60,7 @@
serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
serverProps.put(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS, Long.toString(8));
- serverProps.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60)); // An hour
+ serverProps.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60)); // An hour
Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
clientProps.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true));
clientProps.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
@@ -152,7 +152,7 @@
null, 0, IndexTool.IndexVerifyType.ONLY);
assertEquals(1, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_INVALID_INDEX_ROW_COUNT).getValue());
assertEquals(4, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_VALID_INDEX_ROW_COUNT).getValue());
- if (CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
+ if (BaseScannerRegionObserver.isMaxLookbackTimeEnabled(getUtility().getConfiguration())) {
assertEquals(1, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_MISSING_INDEX_ROW_COUNT).getValue());
} else {
assertEquals(1, indexTool.getJob().getCounters().findCounter(BEFORE_REBUILD_BEYOND_MAXLOOKBACK_MISSING_INDEX_ROW_COUNT).getValue());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameBaseIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameBaseIT.java
index 80180de..5b820e9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameBaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LogicalTableNameBaseIT.java
@@ -58,7 +58,8 @@
import static org.apache.phoenix.query.QueryConstants.NAMESPACE_SEPARATOR;
import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@@ -72,7 +73,7 @@
static void initCluster(boolean isNamespaceMapped) throws Exception {
Map<String, String> props = Maps.newConcurrentMap();
props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60*1000)); // An hour
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60*1000)); // An hour
if (isNamespaceMapped) {
props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString());
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
index 56af50d..81d3c5f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MaxLookbackIT.java
@@ -22,8 +22,7 @@
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
@@ -36,7 +35,6 @@
import org.apache.phoenix.util.TestUtil;
import org.junit.After;
import org.junit.Assert;
-import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -68,23 +66,17 @@
private StringBuilder optionBuilder;
ManualEnvironmentEdge injectEdge;
private int ttl;
- //max lookback isn't supported in HBase 2.1 and 2.2 because of missing coprocessor
- // interfaces; see HBASE-24321
- private final static boolean isMaxLookbackSupported =
- HbaseCompatCapabilities.isMaxLookbackTimeSupported();
@BeforeClass
public static synchronized void doSetup() throws Exception {
- Assume.assumeTrue(isMaxLookbackSupported);
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
props.put(QueryServices.GLOBAL_INDEX_ROW_AGE_THRESHOLD_TO_DELETE_MS_ATTRIB, Long.toString(0));
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(MAX_LOOKBACK_AGE));
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(MAX_LOOKBACK_AGE));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
@Before
public void beforeTest(){
- Assume.assumeTrue(isMaxLookbackSupported);
EnvironmentEdgeManager.reset();
optionBuilder = new StringBuilder();
this.tableDDLOptions = optionBuilder.toString();
@@ -96,16 +88,13 @@
@After
public synchronized void afterTest() throws Exception {
boolean refCountLeaked = isAnyStoreRefCountLeaked();
- if (!isMaxLookbackSupported) {
- return;
- }
+
EnvironmentEdgeManager.reset();
assertFalse("refCount leaked", refCountLeaked);
}
@Test
public void testTooLowSCNWithMaxLookbackAge() throws Exception {
- Assume.assumeTrue(isMaxLookbackSupported);
String dataTableName = generateUniqueName();
createTable(dataTableName);
injectEdge.setValue(System.currentTimeMillis());
@@ -131,7 +120,6 @@
@Test(timeout=120000L)
public void testRecentlyDeletedRowsNotCompactedAway() throws Exception {
- Assume.assumeTrue(isMaxLookbackSupported);
try (Connection conn = DriverManager.getConnection(getUrl())) {
String dataTableName = generateUniqueName();
String indexName = generateUniqueName();
@@ -199,7 +187,6 @@
@Test(timeout=60000L)
public void testTTLAndMaxLookbackAge() throws Exception {
- Assume.assumeTrue(isMaxLookbackSupported);
ttl = 20;
optionBuilder.append("TTL=" + ttl);
tableDDLOptions = optionBuilder.toString();
@@ -272,7 +259,6 @@
@Test(timeout=60000)
public void testRecentMaxVersionsNotCompactedAway() throws Exception {
- Assume.assumeTrue(isMaxLookbackSupported);
int versions = 2;
optionBuilder.append("VERSIONS=" + versions);
tableDDLOptions = optionBuilder.toString();
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsDisabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsDisabledIT.java
index 2ce522d..782faf8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsDisabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelStatsDisabledIT.java
@@ -18,7 +18,7 @@
package org.apache.phoenix.end2end;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.commons.lang3.StringUtils;
import org.apache.phoenix.query.BaseTest;
@@ -28,7 +28,6 @@
import org.apache.phoenix.util.ReadOnlyProps;
import org.junit.AfterClass;
import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -58,7 +57,7 @@
@BeforeClass
public static synchronized void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60)); // An hour
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60*60)); // An hour
props.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(false));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
index 4b64e1b..1110cd1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
@@ -29,11 +29,11 @@
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.PermissionStorage;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
-import org.apache.phoenix.compat.hbase.CompatUtil;
import org.apache.phoenix.util.SchemaUtil;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -91,7 +91,7 @@
String tableZNode = ZNodePaths.joinZNode(aclZNode, "@" + schema);
byte[] data = ZKUtil.getData(zkw, tableZNode);
ListMultimap<String, ? extends Permission> userPermissions =
- CompatUtil.readPermissions(data, conf);
+ PermissionStorage.readPermissions(data, conf);
assertTrue("User permissions not found in cache:",
userPermissions.containsKey(regularUser1.getName()));
List<? extends Permission> tablePermissions =
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SCNIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SCNIT.java
index 77d2284..d0a83d1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SCNIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SCNIT.java
@@ -28,11 +28,8 @@
import java.util.Properties;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.SchemaUtil;
-import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -41,10 +38,6 @@
@Test
public void testReadBeforeDelete() throws Exception {
- //we don't support reading earlier than a delete in HBase 2.0-2.2, only in 1.4+ and 2.3+
- if (!HbaseCompatCapabilities.isLookbackBeyondDeletesSupported()){
- return;
- }
String schemaName = generateUniqueName();
String tableName = generateUniqueName();
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SchemaRegistryFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SchemaRegistryFailureIT.java
index b5a303f..6ab7bd1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SchemaRegistryFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SchemaRegistryFailureIT.java
@@ -39,7 +39,7 @@
import java.sql.SQLException;
import java.util.Map;
-import static org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY;
+import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY;
@Category(NeedsOwnMiniClusterTest.class)
public class SchemaRegistryFailureIT extends ParallelStatsDisabledIT{
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
index 7d86eba..7145d68 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
@@ -41,7 +41,7 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.expression.KeyValueColumnExpression;
import org.apache.phoenix.expression.SingleCellColumnExpression;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -91,7 +91,7 @@
@BeforeClass
public static synchronized void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(0));
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(0));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
index e862b26..76af213 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablesCreationOnConnectionIT.java
@@ -57,6 +57,7 @@
import org.apache.phoenix.jdbc.PhoenixDriver;
import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
import org.apache.phoenix.jdbc.PhoenixTestDriver;
+import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.ConnectionQueryServicesImpl;
import org.apache.phoenix.query.QueryConstants;
@@ -186,8 +187,7 @@
testUtil.getHBaseCluster().getMaster() != null;
boolean refCountLeaked = false;
if (isMasterAvailable) {
- refCountLeaked = CompatUtil.isAnyStoreRefCountLeaked(
- testUtil.getAdmin());
+ refCountLeaked = BaseTest.isAnyStoreRefCountLeaked(testUtil.getAdmin());
}
testUtil.shutdownMiniCluster();
testUtil = null;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 2f07e81..0febfe6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -48,9 +48,8 @@
import java.util.Map;
import java.util.Properties;
-import org.apache.phoenix.compat.hbase.coprocessor
- .CompatBaseScannerRegionObserver;
import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -84,7 +83,7 @@
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
// An hour - inherited from ParallelStatsDisabledIT
props.put(
- CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(60 * 60));
// Postpone scans of SYSTEM.TASK indefinitely so as to prevent
// any addition to GLOBAL_OPEN_PHOENIX_CONNECTIONS
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
index 7a32ad3..ebb12db 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/WALAnnotationIT.java
@@ -29,8 +29,6 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.hbase.index.IndexRegionObserver;
@@ -102,7 +100,6 @@
@Test
public void testSimpleUpsertAndDelete() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
SchemaBuilder builder = new SchemaBuilder(getUrl());
boolean createGlobalIndex = false;
String externalSchemaId = upsertAndDeleteHelper(builder, createGlobalIndex);
@@ -111,7 +108,6 @@
@Test
public void testNoAnnotationsIfChangeDetectionDisabled() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
try (Connection conn = DriverManager.getConnection(getUrl())) {
conn.setAutoCommit(true);
SchemaBuilder builder = new SchemaBuilder(getUrl());
@@ -153,7 +149,6 @@
@Test
public void testCantSetChangeDetectionOnIndex() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
try (Connection conn = DriverManager.getConnection(getUrl())) {
SchemaBuilder builder = new SchemaBuilder(getUrl());
builder.withTableDefaults().build();
@@ -174,7 +169,6 @@
@Test
public void testUpsertAndDeleteWithGlobalIndex() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
SchemaBuilder builder = new SchemaBuilder(getUrl());
boolean createGlobalIndex = true;
String externalSchemaId = upsertAndDeleteHelper(builder, createGlobalIndex);
@@ -228,7 +222,6 @@
@Test
public void testUpsertSelectClientSide() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
try (Connection conn = getConnection()) {
SchemaBuilder baseBuilder = new SchemaBuilder(getUrl());
SchemaBuilder targetBuilder = new SchemaBuilder(getUrl());
@@ -261,7 +254,6 @@
@Test
public void testUpsertSelectServerSide() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
Assume.assumeFalse(isImmutable); //only mutable tables can be processed server-side
SchemaBuilder targetBuilder = new SchemaBuilder(getUrl());
try (Connection conn = getConnection()) {
@@ -284,7 +276,6 @@
@Test
public void testGroupedUpsertSelect() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
// because we're inserting to a different table than we're selecting from, this should be
// processed client-side
SchemaBuilder baseBuilder = new SchemaBuilder(getUrl());
@@ -312,7 +303,6 @@
}
private void testRangeDeleteHelper(boolean isClientSide) throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
SchemaBuilder builder = new SchemaBuilder(getUrl());
builder.withTableOptions(getTableOptions()).build();
try (Connection conn = getConnection()) {
@@ -344,7 +334,6 @@
@Test
public void testGlobalViewUpsert() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
SchemaBuilder builder = new SchemaBuilder(getUrl());
try (Connection conn = getConnection()) {
createGlobalViewHelper(builder, conn);
@@ -379,7 +368,6 @@
@Test
public void testTenantViewUpsert() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
Assume.assumeTrue(isMultiTenant);
boolean createIndex = false;
tenantViewHelper(createIndex);
@@ -433,7 +421,6 @@
@Test
public void testTenantViewUpsertWithIndex() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
Assume.assumeTrue(isMultiTenant);
tenantViewHelper(true);
}
@@ -441,7 +428,6 @@
@Test
public void testOnDuplicateUpsertWithIndex() throws Exception {
Assume.assumeFalse(this.isImmutable); // on duplicate is not supported for immutable tables
- Assume.assumeTrue(HbaseCompatCapabilities.hasPreWALAppend());
SchemaBuilder builder = new SchemaBuilder(getUrl());
try (Connection conn = getConnection()) {
SchemaBuilder.TableOptions tableOptions = getTableOptions();
@@ -540,7 +526,7 @@
RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
TableName tableName = logKey.getTableName();
Map<String, byte[]> annotationMap =
- CompatIndexRegionObserver.getAttributeValuesFromWALKey(logKey);
+ IndexRegionObserver.getAttributeValuesFromWALKey(logKey);
if (annotationMap.size() > 0) {
if (!walAnnotations.containsKey(tableName)) {
walAnnotations.put(tableName, new ArrayList<Map<String, byte[]>>());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTwoPhaseCreateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTwoPhaseCreateIT.java
index 4b9f246..d91af13 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTwoPhaseCreateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexTwoPhaseCreateIT.java
@@ -17,7 +17,7 @@
*/
package org.apache.phoenix.end2end.index;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.end2end.IndexToolIT;
import org.apache.phoenix.end2end.transform.TransformToolIT;
import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -58,7 +58,7 @@
@BeforeClass
public static synchronized void doSetup() throws Exception {
Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
- props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60 * 60)); // An hour
+ props.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(60 * 60)); // An hour
props.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(false));
props.put(QueryServices.INDEX_CREATE_DEFAULT_STATE, PIndexState.CREATE_DISABLE.toString());
props.put(QueryServices.DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, "ONE_CELL_PER_COLUMN");
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/transform/TransformMonitorExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/transform/TransformMonitorExtendedIT.java
index f2fe434..6163a76 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/transform/TransformMonitorExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/transform/TransformMonitorExtendedIT.java
@@ -17,11 +17,11 @@
*/
package org.apache.phoenix.end2end.transform;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.TaskRegionObserver;
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.end2end.index.SingleCellIndexIT;
@@ -68,7 +68,7 @@
@BeforeClass
public static synchronized void doSetup() throws Exception {
Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
- serverProps.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ serverProps.put(BaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
Integer.toString(60*60)); // An hour
serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString());
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
index 7630eb2..b2a7503 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -168,6 +168,30 @@
public int getActiveScanRpcHandlerCount() {
return delegate.getActiveScanRpcHandlerCount();
}
-
-
+
+ @Override
+ public int getMetaPriorityQueueLength() {
+ return this.delegate.getMetaPriorityQueueLength();
+ }
+
+ @Override
+ public int getActiveGeneralRpcHandlerCount() {
+ return this.delegate.getActiveGeneralRpcHandlerCount();
+ }
+
+ @Override
+ public int getActivePriorityRpcHandlerCount() {
+ return this.delegate.getActivePriorityRpcHandlerCount();
+ }
+
+ @Override
+ public int getActiveMetaPriorityRpcHandlerCount() {
+ return this.delegate.getActiveMetaPriorityRpcHandlerCount();
+ }
+
+ @Override
+ public int getActiveReplicationRpcHandlerCount() {
+ return this.delegate.getActiveReplicationRpcHandlerCount();
+ }
+
}
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 80ea6be..9f238ea 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -30,12 +30,15 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.hadoop.hbase.io.hfile.ReaderContext;
+import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.compat.hbase.CompatStoreFileReader;
import org.apache.phoenix.index.IndexMaintainer;
/**
@@ -53,7 +56,7 @@
* This file is not splitable. Calls to #midkey() return null.
*/
-public class IndexHalfStoreFileReader extends CompatStoreFileReader {
+public class IndexHalfStoreFileReader extends StoreFileReader {
private final boolean top;
// This is the key we split around. Its the first possible entry on a row:
// i.e. empty column and a timestamp of LATEST_TIMESTAMP.
@@ -89,7 +92,12 @@
final byte[][] viewConstants, final RegionInfo regionInfo,
byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile,
AtomicInteger refCount, RegionInfo currentRegion) throws IOException {
- super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, refCount, conf);
+ super(new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile,
+ ReaderType.STREAM),
+ new HFileInfo(new ReaderContext(p, in, size, new HFileSystem(fs),
+ primaryReplicaStoreFile, ReaderType.STREAM), conf),
+ cacheConf, refCount, conf);
+ getHFileReader().getHFileInfo().initMetaAndIndex(getHFileReader());
this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
// Is it top or bottom half?
this.top = Reference.isTopFileRegion(r.getFileRegion());
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
index 67edb6d..dc1e9ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/QueryCompiler.java
@@ -31,13 +31,12 @@
import org.apache.phoenix.thirdparty.com.google.common.base.Optional;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
import org.apache.phoenix.compile.JoinCompiler.JoinSpec;
import org.apache.phoenix.compile.JoinCompiler.JoinTable;
import org.apache.phoenix.compile.JoinCompiler.Table;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.exception.SQLExceptionInfo;
import org.apache.phoenix.execute.AggregatePlan;
@@ -181,9 +180,6 @@
}
private void verifySCN() throws SQLException {
- if (!HbaseCompatCapabilities.isMaxLookbackTimeSupported()) {
- return;
- }
PhoenixConnection conn = statement.getConnection();
if (conn.isRunningUpgrade()) {
// PHOENIX-6179 : if upgrade is going on, we don't need to
@@ -195,7 +191,7 @@
return;
}
long maxLookBackAgeInMillis =
- CompatBaseScannerRegionObserver.getMaxLookbackInMillis(conn.getQueryServices().
+ BaseScannerRegionObserver.getMaxLookbackInMillis(conn.getQueryServices().
getConfiguration());
long now = EnvironmentEdgeManager.currentTimeMillis();
if (maxLookBackAgeInMillis > 0 && now - maxLookBackAgeInMillis > scn){
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 7335369..06eb099 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -20,22 +20,34 @@
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeepDeletedCells;
+import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanOptions;
+import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.ScannerContext;
import org.apache.hadoop.hbase.regionserver.ScannerContextUtil;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.filter.PagedFilter;
import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -49,7 +61,7 @@
import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForFilter;
-abstract public class BaseScannerRegionObserver extends CompatBaseScannerRegionObserver {
+abstract public class BaseScannerRegionObserver implements RegionObserver {
public static final String AGGREGATORS = "_Aggs";
public static final String UNORDERED_GROUP_BY_EXPRESSIONS = "_UnorderedGroupByExpressions";
@@ -142,7 +154,11 @@
// In case of Index Write failure, we need to determine that Index mutation
// is part of normal client write or Index Rebuilder. # PHOENIX-5080
public final static byte[] REPLAY_INDEX_REBUILD_WRITES = PUnsignedTinyint.INSTANCE.toBytes(3);
-
+
+ public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY =
+ "phoenix.max.lookback.age.seconds";
+ public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0;
+
public enum ReplayWrite {
TABLE_AND_INDEX,
INDEX_ONLY,
@@ -403,14 +419,165 @@
/* We want to override the store scanner so that we can read "past" a delete
marker on an SCN / lookback query to see the underlying edit. This was possible
in HBase 1.x, but not possible after the interface changes in HBase 2.0. HBASE-24321 in
- HBase 2.3 gave us this ability back, but we need to use it through a compatibility shim
- so we can compile against 2.1 and 2.2. When 2.3 is the minimum supported HBase
- version, the shim can be retired and the logic moved into the real coproc.
-
+ HBase 2.3 gave us this ability back.
We also need to override the flush compaction coproc hooks in order to implement max lookback
age to keep versions from being purged.
+ */
- Because the required APIs aren't present in HBase 2.1 and 2.2, we override in the 2.3
- version of CompatBaseScannerRegionObserver and no-op in the other versions. */
+ @Override
+ public void preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
+ ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
+ CompactionRequest request) throws IOException {
+ Configuration conf = c.getEnvironment().getConfiguration();
+ if (isMaxLookbackTimeEnabled(conf)) {
+ setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
+ }
+ }
+
+ @Override
+ public void preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
+ ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
+ Configuration conf = c.getEnvironment().getConfiguration();
+ if (isMaxLookbackTimeEnabled(conf)) {
+ setScanOptionsForFlushesAndCompactions(conf, options, store, ScanType.COMPACT_RETAIN_DELETES);
+ }
+ }
+
+ @Override
+ public void preMemStoreCompactionCompactScannerOpen(
+ ObserverContext<RegionCoprocessorEnvironment> c, Store store, ScanOptions options)
+ throws IOException {
+ Configuration conf = c.getEnvironment().getConfiguration();
+ if (isMaxLookbackTimeEnabled(conf)) {
+ MemoryCompactionPolicy inMemPolicy =
+ store.getColumnFamilyDescriptor().getInMemoryCompaction();
+ ScanType scanType;
+ //the eager and adaptive in-memory compaction policies can purge versions; the others
+ // can't. (Eager always does; adaptive sometimes does)
+ if (inMemPolicy.equals(MemoryCompactionPolicy.EAGER) ||
+ inMemPolicy.equals(MemoryCompactionPolicy.ADAPTIVE)) {
+ scanType = ScanType.COMPACT_DROP_DELETES;
+ } else {
+ scanType = ScanType.COMPACT_RETAIN_DELETES;
+ }
+ setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
+ }
+ }
+
+ @Override
+ public void preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, Store store,
+ ScanOptions options) throws IOException {
+
+ if (!storeFileScanDoesntNeedAlteration(options)) {
+ //PHOENIX-4277 -- When doing a point-in-time (SCN) Scan, HBase by default will hide
+ // mutations that happen before a delete marker. This overrides that behavior.
+ options.setMinVersions(options.getMinVersions());
+ KeepDeletedCells keepDeletedCells = KeepDeletedCells.TRUE;
+ if (store.getColumnFamilyDescriptor().getTimeToLive() != HConstants.FOREVER) {
+ keepDeletedCells = KeepDeletedCells.TTL;
+ }
+ options.setKeepDeletedCells(keepDeletedCells);
+ }
+ }
+
+ private boolean storeFileScanDoesntNeedAlteration(ScanOptions options) {
+ Scan scan = options.getScan();
+ boolean isRaw = scan.isRaw();
+ //true if keep deleted cells is either TRUE or TTL
+ boolean keepDeletedCells = options.getKeepDeletedCells().equals(KeepDeletedCells.TRUE) ||
+ options.getKeepDeletedCells().equals(KeepDeletedCells.TTL);
+ boolean timeRangeIsLatest = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP;
+ boolean timestampIsTransactional =
+ isTransactionalTimestamp(scan.getTimeRange().getMax());
+ return isRaw
+ || keepDeletedCells
+ || timeRangeIsLatest
+ || timestampIsTransactional;
+ }
+
+ private boolean isTransactionalTimestamp(long ts) {
+ //have to use the HBase edge manager because the Phoenix one is in phoenix-core
+ return ts > (long) (EnvironmentEdgeManager.currentTime() * 1.1);
+ }
+
+ /*
+ * If KeepDeletedCells.FALSE, KeepDeletedCells.TTL ,
+ * let delete markers age once lookback age is done.
+ */
+ public KeepDeletedCells getKeepDeletedCells(ScanOptions options, ScanType scanType) {
+ //if we're doing a minor compaction or flush, always set keep deleted cells
+ //to true. Otherwise, if keep deleted cells is false or TTL, use KeepDeletedCells TTL,
+ //where the value of the ttl might be overriden to the max lookback age elsewhere
+ return (options.getKeepDeletedCells() == KeepDeletedCells.TRUE
+ || scanType.equals(ScanType.COMPACT_RETAIN_DELETES)) ?
+ KeepDeletedCells.TRUE : KeepDeletedCells.TTL;
+ }
+
+ /*
+ * if the user set a TTL we should leave MIN_VERSIONS at the default (0 in most of the cases).
+ * Otherwise the data (1st version) will not be removed after the TTL. If no TTL, we want
+ * Math.max(maxVersions, minVersions, 1)
+ */
+ public int getMinVersions(ScanOptions options, ColumnFamilyDescriptor cfDescriptor) {
+ return cfDescriptor.getTimeToLive() != HConstants.FOREVER ? options.getMinVersions()
+ : Math.max(Math.max(options.getMinVersions(),
+ cfDescriptor.getMaxVersions()),1);
+ }
+
+ /**
+ *
+ * @param conf HBase Configuration
+ * @param columnDescriptor ColumnFamilyDescriptor for the store being compacted
+ * @param options ScanOptions of overrides to the compaction scan
+ * @return Time to live in milliseconds, based on both HBase TTL and Phoenix max lookback age
+ */
+ public long getTimeToLiveForCompactions(Configuration conf,
+ ColumnFamilyDescriptor columnDescriptor,
+ ScanOptions options) {
+ long ttlConfigured = columnDescriptor.getTimeToLive();
+ long ttlInMillis = ttlConfigured * 1000;
+ long maxLookbackTtl = getMaxLookbackInMillis(conf);
+ if (isMaxLookbackTimeEnabled(maxLookbackTtl)) {
+ if (ttlConfigured == HConstants.FOREVER
+ && columnDescriptor.getKeepDeletedCells() != KeepDeletedCells.TRUE) {
+ // If user configured default TTL(FOREVER) and keep deleted cells to false or
+ // TTL then to remove unwanted delete markers we should change ttl to max lookback age
+ ttlInMillis = maxLookbackTtl;
+ } else {
+ //if there is a TTL, use TTL instead of max lookback age.
+ // Max lookback age should be more recent or equal to TTL
+ ttlInMillis = Math.max(ttlInMillis, maxLookbackTtl);
+ }
+ }
+
+ return ttlInMillis;
+ }
+
+ public void setScanOptionsForFlushesAndCompactions(Configuration conf,
+ ScanOptions options,
+ final Store store,
+ ScanType type) {
+ ColumnFamilyDescriptor cfDescriptor = store.getColumnFamilyDescriptor();
+ options.setTTL(getTimeToLiveForCompactions(conf, cfDescriptor,
+ options));
+ options.setKeepDeletedCells(getKeepDeletedCells(options, type));
+ options.setMaxVersions(Integer.MAX_VALUE);
+ options.setMinVersions(getMinVersions(options, cfDescriptor));
+ }
+
+ public static long getMaxLookbackInMillis(Configuration conf){
+ //config param is in seconds, switch to millis
+ return conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000;
+ }
+
+ public static boolean isMaxLookbackTimeEnabled(Configuration conf){
+ return isMaxLookbackTimeEnabled(conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+ DEFAULT_PHOENIX_MAX_LOOKBACK_AGE));
+ }
+
+ public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime){
+ return maxLookbackTime > 0L;
+ }
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
index b05854b..0686e7a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
@@ -39,8 +39,6 @@
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
import org.apache.phoenix.filter.PagedFilter;
import org.apache.phoenix.hbase.index.ValueGetter;
import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -163,7 +161,6 @@
protected Map<byte[], NavigableSet<byte[]>> familyMap;
protected IndexTool.IndexVerifyType verifyType = IndexTool.IndexVerifyType.NONE;
protected boolean verify = false;
- protected boolean isRawFilterSupported;
public GlobalIndexRegionScanner(final RegionScanner innerScanner,
final Region region,
@@ -219,7 +216,7 @@
}
// Create the following objects only for rebuilds by IndexTool
hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(env);
- maxLookBackInMills = CompatBaseScannerRegionObserver.getMaxLookbackInMillis(config);
+ maxLookBackInMills = BaseScannerRegionObserver.getMaxLookbackInMillis(config);
rowCountPerTask = config.getInt(INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY,
DEFAULT_INDEX_VERIFY_ROW_COUNTS_PER_TASK);
@@ -257,7 +254,6 @@
new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory);
nextStartKey = null;
minTimestamp = scan.getTimeRange().getMin();
- isRawFilterSupported = HbaseCompatCapabilities.isRawFilterSupported();
}
}
@@ -340,7 +336,7 @@
protected static boolean isTimestampBeyondMaxLookBack(long maxLookBackInMills,
long currentTime, long tsToCheck) {
- if (!CompatBaseScannerRegionObserver.isMaxLookbackTimeEnabled(maxLookBackInMills)) {
+ if (!BaseScannerRegionObserver.isMaxLookbackTimeEnabled(maxLookBackInMills)) {
// By definition, if the max lookback feature is not enabled, then delete markers and rows
// version can be removed by compaction any time, and thus there is no window in which these mutations are
// preserved, i.e., the max lookback window size is zero. This means all the mutations are effectively
@@ -1142,10 +1138,8 @@
Scan indexScan = new Scan();
indexScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax());
scanRanges.initializeScan(indexScan);
- if (isRawFilterSupported) {
- SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
- indexScan.setFilter(new SkipScanFilter(skipScanFilter, true));
- }
+ SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
+ indexScan.setFilter(new SkipScanFilter(skipScanFilter, true));
indexScan.setRaw(true);
indexScan.readAllVersions();
indexScan.setCacheBlocks(false);
@@ -1443,11 +1437,6 @@
if (filter instanceof PagedFilter) {
PagedFilter pageFilter = (PagedFilter) filter;
Filter delegateFilter = pageFilter.getDelegateFilter();
- if (!HbaseCompatCapabilities.isRawFilterSupported() &&
- (delegateFilter == null || delegateFilter instanceof FirstKeyOnlyFilter)) {
- scan.setFilter(null);
- return true;
- }
if (delegateFilter instanceof FirstKeyOnlyFilter) {
pageFilter.setDelegateFilter(null);
} else if (delegateFilter != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java
index f99e153..04e0070 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRebuildRegionScanner.java
@@ -158,9 +158,6 @@
Scan indexScan = prepareIndexScan(expectedIndexMutationMap);
try (ResultScanner resultScanner = indexHTable.getScanner(indexScan)) {
for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) {
- if (!isRawFilterSupported && !expectedIndexMutationMap.containsKey(result.getRow())) {
- continue;
- }
ungroupedAggregateRegionObserver.checkForRegionClosingOrSplitting();
List<Mutation> mutationList = prepareActualIndexMutations(result);
actualIndexMutationMap.put(result.getRow(), mutationList);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java
index ee41dc4..53248eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/IndexRepairRegionScanner.java
@@ -149,10 +149,8 @@
Scan dataScan = new Scan();
dataScan.setTimeRange(scan.getTimeRange().getMin(), scan.getTimeRange().getMax());
scanRanges.initializeScan(dataScan);
- if(isRawFilterSupported) {
- SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
- dataScan.setFilter(new SkipScanFilter(skipScanFilter, true));
- }
+ SkipScanFilter skipScanFilter = scanRanges.getSkipScanFilter();
+ dataScan.setFilter(new SkipScanFilter(skipScanFilter, true));
dataScan.setRaw(true);
dataScan.setMaxVersions();
dataScan.setCacheBlocks(false);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index f0217c4..e25f214 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -58,7 +58,6 @@
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.security.access.UserPermission;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.phoenix.compat.hbase.CompatPermissionUtil;
import org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
@@ -80,10 +79,6 @@
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
-import static org.apache.phoenix.compat.hbase.CompatPermissionUtil.authorizeUserTable;
-import static org.apache.phoenix.compat.hbase.CompatPermissionUtil.getPermissionFromUP;
-import static org.apache.phoenix.compat.hbase.CompatPermissionUtil.getUserFromUP;
-
public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
private PhoenixMetaDataControllerEnvironment env;
@@ -154,13 +149,7 @@
"Not a valid environment, should be loaded by PhoenixMetaDataControllerEnvironment");
}
- //2.3+ doesn't need to access ZK object.
- ZKWatcher zk = null;
- RegionCoprocessorEnvironment regionEnv = this.env.getRegionCoprocessorEnvironment();
- if (regionEnv instanceof HasRegionServerServices) {
- zk = ((HasRegionServerServices) regionEnv).getRegionServerServices().getZooKeeper();
- }
- accessChecker = CompatPermissionUtil.newAccessChecker(env.getConfiguration(), zk);
+ accessChecker = new AccessChecker(env.getConfiguration());
// set the user-provider.
this.userProvider = UserProvider.instantiate(env.getConfiguration());
// init superusers and add the server principal (if using security)
@@ -169,13 +158,6 @@
}
@Override
- public void stop(CoprocessorEnvironment env) throws IOException {
- if (accessChecker.getAuthManager() != null) {
- CompatPermissionUtil.stopAccessChecker(accessChecker);
- }
- }
-
- @Override
public void preCreateTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId,
String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType,
Set<byte[]> familySet, Set<TableName> indexes) throws IOException {
@@ -222,7 +204,7 @@
if (permissionForUser != null) {
for (UserPermission userPermission : permissionForUser) {
for (Action action : Arrays.asList(requiredActions)) {
- if (!getPermissionFromUP(userPermission).implies(action)) {
+ if (!userPermission.getPermission().implies(action)) {
requireAccess.add(action);
}
}
@@ -230,7 +212,7 @@
if (!requireAccess.isEmpty()) {
for (UserPermission userPermission : permissionForUser) {
accessExists.addAll(Arrays.asList(
- getPermissionFromUP(userPermission).getActions()));
+ userPermission.getPermission().getActions()));
}
}
} else {
@@ -307,15 +289,15 @@
Set<Action> requireAccess = new HashSet<Action>();
Set<Action> accessExists = new HashSet<Action>();
List<UserPermission> permsToTable = getPermissionForUser(permissionsOnTheTable,
- getUserFromUP(userPermission));
+ userPermission.getUser());
for (Action action : requiredActionsOnTable) {
boolean haveAccess=false;
- if (getPermissionFromUP(userPermission).implies(action)) {
+ if (userPermission.getPermission().implies(action)) {
if (permsToTable == null) {
requireAccess.add(action);
} else {
for (UserPermission permToTable : permsToTable) {
- if (getPermissionFromUP(permToTable).implies(action)) {
+ if (permToTable.getPermission().implies(action)) {
haveAccess=true;
}
}
@@ -329,19 +311,19 @@
// Append access to already existing access for the user
for (UserPermission permToTable : permsToTable) {
accessExists.addAll(Arrays.asList(
- getPermissionFromUP(permToTable).getActions()));
+ permToTable.getPermission().getActions()));
}
}
if (!requireAccess.isEmpty()) {
- if (AuthUtil.isGroupPrincipal(getUserFromUP(userPermission))){
- AUDITLOG.warn("Users of GROUP:" + getUserFromUP(userPermission)
+ if (AuthUtil.isGroupPrincipal(userPermission.getUser())){
+ AUDITLOG.warn("Users of GROUP:" + userPermission.getUser()
+ " will not have following access " + requireAccess
+ " to the newly created index " + toTable
+ ", Automatic grant is not yet allowed on Groups");
continue;
}
handleRequireAccessOnDependentTable(request,
- getUserFromUP(userPermission), toTable,
+ userPermission.getUser(), toTable,
toTable.getNameAsString(), requireAccess, accessExists);
}
}
@@ -358,7 +340,7 @@
// permissions for same users
List<UserPermission> permissions = new ArrayList<>();
for (UserPermission p : perms) {
- if (getUserFromUP(p).equals(user)){
+ if (p.getUser().equals(user)){
permissions.add(p);
}
}
@@ -622,14 +604,14 @@
}
if (perms != null) {
if (hbaseAccessControllerEnabled
- && authorizeUserTable(accessChecker, user, table, action)) {
+ && accessChecker.getAuthManager().authorizeUserTable(user, table, action)) {
return true;
}
List<UserPermission> permissionsForUser =
getPermissionForUser(perms, user.getShortName());
if (permissionsForUser != null) {
for (UserPermission permissionForUser : permissionsForUser) {
- if (getPermissionFromUP(permissionForUser).implies(action)) { return true; }
+ if (permissionForUser.getPermission().implies(action)) { return true; }
}
}
String[] groupNames = user.getGroupNames();
@@ -638,7 +620,7 @@
List<UserPermission> groupPerms =
getPermissionForUser(perms, (AuthUtil.toGroupEntry(group)));
if (groupPerms != null) for (UserPermission permissionForUser : groupPerms) {
- if (getPermissionFromUP(permissionForUser).implies(action)) { return true; }
+ if (permissionForUser.getPermission().implies(action)) { return true; }
}
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java
index 59bd793..b228bdc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UncoveredLocalIndexRegionScanner.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.index.IndexMaintainer;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
index 88e46d1..350430e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
@@ -320,4 +321,9 @@
public long getOperationTimeout(TimeUnit unit) {
return delegate.getOperationTimeout(unit);
}
+
+ @Override
+ public RegionLocator getRegionLocator() throws IOException {
+ return delegate.getRegionLocator();
+ }
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 0a6b92a..22fab63 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -62,7 +62,6 @@
import org.apache.htrace.Span;
import org.apache.htrace.TraceScope;
import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
import org.apache.phoenix.compile.MutationPlan;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
@@ -761,10 +760,8 @@
}
private void annotateMutationsWithMetadata(PTable table, List<Mutation> rowMutations) {
- //only annotate if the change detection flag is on the table and HBase supports
- // preWALAppend coprocs server-side
- if (table == null || !table.isChangeDetectionEnabled()
- || !HbaseCompatCapabilities.hasPreWALAppend()) {
+ //only annotate if the change detection flag is on the table.
+ if (table == null || !table.isChangeDetectionEnabled()) {
return;
}
//annotate each mutation with enough metadata so that anyone interested can
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
index 2c40c98..cf4d1fa 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java
@@ -76,8 +76,6 @@
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
import org.apache.phoenix.compile.ScanRanges;
import org.apache.phoenix.coprocessor.DelegateRegionCoprocessorEnvironment;
import org.apache.phoenix.coprocessor.GlobalIndexRegionScanner;
@@ -145,8 +143,7 @@
* Phoenix always does batch mutations.
* <p>
*/
-public class IndexRegionObserver extends CompatIndexRegionObserver implements RegionCoprocessor,
- RegionObserver {
+public class IndexRegionObserver implements RegionCoprocessor, RegionObserver {
private static final Logger LOG = LoggerFactory.getLogger(IndexRegionObserver.class);
private static final OperationStatus IGNORE = new OperationStatus(OperationStatusCode.SUCCESS);
@@ -1243,7 +1240,7 @@
@Override
public void preWALAppend(ObserverContext<RegionCoprocessorEnvironment> c, WALKey key,
WALEdit edit) {
- if (HbaseCompatCapabilities.hasPreWALAppend() && shouldWALAppend) {
+ if (shouldWALAppend) {
BatchMutateContext context = getBatchMutateContext(c);
WALAnnotationUtil.appendMutationAttributesToWALKey(key, context);
}
@@ -1640,4 +1637,16 @@
}
return Lists.newArrayList(latestColVals.values());
}
+
+ public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) {
+ key.addExtendedAttribute(attrKey, attrValue);
+ }
+
+ public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) {
+ return key.getExtendedAttribute(attrKey);
+ }
+
+ public static Map<String, byte[]> getAttributeValuesFromWALKey(WALKey key) {
+ return new HashMap<String, byte[]>(key.getExtendedAttributes());
+ }
}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java
similarity index 98%
rename from phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
rename to phoenix-core/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java
index c5485a5..4582f3b 100644
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/OffsetCell.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.phoenix.compat.hbase;
+package org.apache.phoenix.hbase.index;
import org.apache.hadoop.hbase.Cell;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 3a9071e..00cdcdc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -263,15 +263,14 @@
.withChecksumType(CompatUtil.getChecksumType(conf))
.withBytesPerCheckSum(CompatUtil.getBytesPerChecksum(conf))
.withBlockSize(blockSize)
- .withDataBlockEncoding(encoding);
- CompatUtil.withComparator(contextBuilder, CellComparatorImpl.COMPARATOR);
+ .withDataBlockEncoding(encoding)
+ .withCellComparator(CellComparatorImpl.COMPARATOR);
HFileContext hFileContext = contextBuilder.build();
StoreFileWriter.Builder storeFileWriterBuilder =
new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs)
.withOutputDir(familydir).withBloomType(bloomType)
.withFileContext(hFileContext);
- CompatUtil.withComparator(storeFileWriterBuilder, CellComparatorImpl.COMPARATOR);
wl.writer = storeFileWriterBuilder.build();
// join and put it in the writers map .
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
index 6f3ca2c..a43f7dc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyMapper.java
@@ -43,7 +43,7 @@
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixResultSet;
import org.apache.phoenix.mapreduce.PhoenixJobCounters;
@@ -167,7 +167,7 @@
LOGGER.info("Target table base query: " + targetTableQuery);
md5 = MessageDigest.getInstance("MD5");
ttl = getTableTtl();
- maxLookbackAgeMillis = CompatBaseScannerRegionObserver.getMaxLookbackInMillis(configuration);
+ maxLookbackAgeMillis = BaseScannerRegionObserver.getMaxLookbackInMillis(configuration);
} catch (SQLException | NoSuchAlgorithmException e) {
tryClosingResourceSilently(this.outputUpsertStmt);
tryClosingResourceSilently(this.connection);
@@ -315,7 +315,7 @@
}
protected boolean isRowOlderThanMaxLookback(Long sourceTS){
- if (maxLookbackAgeMillis == CompatBaseScannerRegionObserver.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000){
+ if (maxLookbackAgeMillis == BaseScannerRegionObserver.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000){
return false;
}
long now = EnvironmentEdgeManager.currentTimeMillis();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index 4200271..fce584f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -48,7 +48,7 @@
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.mapreduce.CsvBulkImportUtil;
import org.apache.phoenix.mapreduce.util.ConnectionUtil;
@@ -516,8 +516,8 @@
}
private void validateTimestamp(Configuration configuration, long ts) {
- long maxLookBackAge = CompatBaseScannerRegionObserver.getMaxLookbackInMillis(configuration);
- if (maxLookBackAge != CompatBaseScannerRegionObserver.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000L) {
+ long maxLookBackAge = BaseScannerRegionObserver.getMaxLookbackInMillis(configuration);
+ if (maxLookBackAge != BaseScannerRegionObserver.DEFAULT_PHOENIX_MAX_LOOKBACK_AGE * 1000L) {
long minTimestamp = EnvironmentEdgeManager.currentTimeMillis() - maxLookBackAge;
if (ts < minTimestamp){
throw new IllegalArgumentException("Index scrutiny can't look back past the configured" +
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index f5b41c7..1d730ec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -75,7 +75,6 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
import org.apache.phoenix.compile.PostIndexDDLCompiler;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.hbase.index.ValueGetter;
@@ -403,11 +402,6 @@
"DisableLoggingType: ["
+ cmdLine.getOptionValue(DISABLE_LOGGING_OPTION.getOpt()) + "]");
}
- if ((cmdLine.hasOption(START_TIME_OPTION.getOpt()) || cmdLine.hasOption(RETRY_VERIFY_OPTION.getOpt()))
- && !HbaseCompatCapabilities.isRawFilterSupported()) {
- throw new IllegalStateException("Can't do incremental index verification on this " +
- "version of HBase because raw skip scan filters are not supported.");
- }
return cmdLine;
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
index cdd8349..7d4d7bd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionTable.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
@@ -372,4 +373,8 @@
throw new UnsupportedOperationException();
}
+ @Override
+ public RegionLocator getRegionLocator() throws IOException {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 7f2ebf5..c7e7d6b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -76,7 +76,6 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableUtils;
-import org.apache.phoenix.compat.hbase.OffsetCell;
import org.apache.phoenix.compile.ColumnResolver;
import org.apache.phoenix.compile.FromCompiler;
import org.apache.phoenix.compile.IndexStatementRewriter;
@@ -98,6 +97,7 @@
import org.apache.phoenix.expression.SingleCellColumnExpression;
import org.apache.phoenix.expression.visitor.RowKeyExpressionVisitor;
import org.apache.phoenix.hbase.index.AbstractValueGetter;
+import org.apache.phoenix.hbase.index.OffsetCell;
import org.apache.phoenix.hbase.index.ValueGetter;
import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
index f553c85..23dfc2a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixKeyValueUtil.java
@@ -178,7 +178,7 @@
long size = 0;
for (Entry<byte [], List<Cell>> entry : m.getFamilyCellMap().entrySet()) {
for (Cell c : entry.getValue()) {
- size += CompatUtil.getCellSerializedSize(c);
+ size += c.getSerializedSize();
}
}
return size;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java
index d5f4a1e..634b57b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/WALAnnotationUtil.java
@@ -18,7 +18,6 @@
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.phoenix.compat.hbase.coprocessor.CompatIndexRegionObserver;
import org.apache.phoenix.execute.MutationState;
import org.apache.phoenix.hbase.index.IndexRegionObserver;
@@ -39,7 +38,7 @@
MutationState.MutationMetadataType.values()) {
String metadataTypeKey = metadataType.toString();
if (attrMap.containsKey(metadataTypeKey)) {
- CompatIndexRegionObserver.appendToWALKey(key, metadataTypeKey,
+ IndexRegionObserver.appendToWALKey(key, metadataTypeKey,
attrMap.get(metadataTypeKey));
}
}
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java
index 1ce12a6..7417901 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexToolTest.java
@@ -18,16 +18,13 @@
package org.apache.phoenix.index;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
import org.apache.phoenix.end2end.IndexToolIT;
import org.apache.phoenix.mapreduce.index.IndexScrutinyTool;
import org.apache.phoenix.mapreduce.index.IndexTool;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.IndexScrutiny;
import org.junit.Assert;
-import org.junit.Assume;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -40,7 +37,6 @@
import static org.apache.phoenix.mapreduce.index.IndexTool.INVALID_TIME_RANGE_EXCEPTION_MESSAGE;
import static org.apache.phoenix.mapreduce.index.IndexTool.RETRY_VERIFY_NOT_APPLICABLE;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assume.assumeTrue;
import static org.mockito.Mockito.when;
public class IndexToolTest extends BaseTest {
@@ -69,7 +65,6 @@
@Test
public void testParseOptions_timeRange_timeRangeNotNull() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = 10L;
Long endTime = 15L;
String [] args =
@@ -95,7 +90,6 @@
@Test
public void testParseOptions_timeRange_startTimeNotNull() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = 10L;
String [] args =
IndexToolIT.getArgValues(true, schema,
@@ -135,7 +129,6 @@
@Test
public void testParseOptions_timeRange_endTimeNullStartTimeInFuture() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = EnvironmentEdgeManager.currentTimeMillis() + 100000;
String [] args =
IndexToolIT.getArgValues(true, schema,
@@ -149,7 +142,6 @@
@Test(timeout = 10000 /* 10 secs */)
public void testParseOptions_timeRange_startTimeInFuture() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = EnvironmentEdgeManager.currentTimeMillis() + 100000;
Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 200000;
String [] args =
@@ -164,7 +156,6 @@
@Test(timeout = 10000 /* 10 secs */)
public void testParseOptions_timeRange_endTimeInFuture() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = EnvironmentEdgeManager.currentTimeMillis();
Long endTime = EnvironmentEdgeManager.currentTimeMillis() + 100000;
String [] args =
@@ -179,7 +170,6 @@
@Test
public void testParseOptions_timeRange_startTimeEqEndTime() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = 10L;
Long endTime = 10L;
String [] args =
@@ -194,7 +184,6 @@
@Test
public void testParseOptions_timeRange_startTimeGtEndTime() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = 10L;
Long endTime = 1L;
String [] args =
@@ -217,7 +206,6 @@
@Test
public void testIncrcementalVerifyOption() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
IndexTool mockTool = Mockito.mock(IndexTool.class);
when(mockTool.getLastVerifyTime()).thenCallRealMethod();
Long lastVerifyTime = 10L;
@@ -243,7 +231,6 @@
@Test
public void testIncrcementalVerifyOption_notApplicable() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
IndexTool mockTool = Mockito.mock(IndexTool.class);
when(mockTool.getLastVerifyTime()).thenCallRealMethod();
Long lastVerifyTime = 10L;
@@ -266,39 +253,6 @@
}
@Test
- public void testIncrementalVerifyNotSupportedWithoutRawSkipScanFilters() {
- //We should give an exception if we try to use incremental verification on HBase 2.1
- // which lacks HBASE-22710 enabling raw skip scan filters. For 2.2 we assume 2.2.5+
- Assume.assumeFalse(HbaseCompatCapabilities.isRawFilterSupported());
- try {
- IndexTool it = new IndexTool();
- Long lastVerifyTime = 10L;
- String[] args =
- IndexToolIT.getArgValues(true, schema,
- dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.AFTER,
- lastVerifyTime, null, IndexTool.IndexDisableLoggingType.NONE,
- lastVerifyTime);
- it.parseOptions(args);
- Assert.fail("Should have thrown an IllegalStateException");
- } catch (IllegalStateException ise) {
- //eat exception
- }
- //now check retry-verify
- try {
- IndexTool it = new IndexTool();
- Long lastVerifyTime = 10L;
- String[] args =
- IndexToolIT.getArgValues(true, schema,
- dataTable, indexTable, tenantId, IndexTool.IndexVerifyType.AFTER,
- null, null, IndexTool.IndexDisableLoggingType.NONE,
- lastVerifyTime);
- it.parseOptions(args);
- Assert.fail("Should have thrown an IllegalStateException");
- } catch (IllegalStateException ise) {
- //eat exception
- }
- }
- @Test
public void testCheckVerifyAndDisableLogging_defaultsNone() throws Exception {
Long startTime = null;
Long endTime = 10L;
@@ -383,7 +337,6 @@
@Test
public void testIndexToolDefaultSource() throws Exception {
- assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
Long startTime = 1L;
Long endTime = 10L;
String [] args =
@@ -397,7 +350,6 @@
@Test
public void testIndexToolFromIndexSource() throws Exception {
- assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
verifyFromIndexOption(IndexTool.IndexVerifyType.ONLY);
verifyFromIndexOption(IndexTool.IndexVerifyType.BEFORE);
}
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
index f06376f..10ccdf6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/index/IndexUpgradeToolTest.java
@@ -31,7 +31,6 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
import org.apache.phoenix.mapreduce.index.IndexTool;
import org.apache.phoenix.mapreduce.index.IndexUpgradeTool;
import org.apache.phoenix.query.QueryServices;
@@ -39,7 +38,6 @@
import org.apache.phoenix.util.PhoenixRuntime;
import org.junit.Assert;
-import org.junit.Assume;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -100,7 +98,6 @@
@Test
public void testIfOptionsArePassedToIndexTool() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
if (!upgrade) {
return;
}
@@ -130,7 +127,6 @@
@Test
public void testMalformedSpacingOptionsArePassedToIndexTool() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isRawFilterSupported());
if (!upgrade) {
return;
}
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java b/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java
index e3c3e9b..c0894c0 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/index/VerifySingleIndexRowTest.java
@@ -31,11 +31,9 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.compat.hbase.HbaseCompatCapabilities;
import org.apache.phoenix.coprocessor.GlobalIndexRegionScanner;
import org.apache.phoenix.coprocessor.IndexRebuildRegionScanner;
import org.apache.phoenix.coprocessor.IndexToolVerificationResult;
-import org.apache.phoenix.hbase.index.IndexRegionObserver;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.mapreduce.index.IndexVerificationOutputRepository;
import org.apache.phoenix.query.BaseConnectionlessQueryTest;
@@ -43,7 +41,6 @@
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.util.*;
-import org.junit.Assume;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -471,7 +468,6 @@
// We will report such row as a valid row.
@Test
public void testVerifySingleIndexRow_compactionOnIndexTable_atLeastOneExpectedMutationWithinMaxLookBack() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isMaxLookbackTimeSupported());
String dataRowKey = "k1";
byte[] indexRowKey1Bytes = generateIndexRowKey(dataRowKey, "val1");
ManualEnvironmentEdge injectEdge = new ManualEnvironmentEdge();
@@ -531,7 +527,6 @@
// We will report such row as an invalid beyond maxLookBack row.
@Test
public void testVerifySingleIndexRow_compactionOnIndexTable_noExpectedMutationWithinMaxLookBack() throws Exception {
- Assume.assumeTrue(HbaseCompatCapabilities.isMaxLookbackTimeSupported());
String dataRowKey = "k1";
byte[] indexRowKey1Bytes = generateIndexRowKey(dataRowKey, "val1");
List<Mutation> expectedMutations = new ArrayList<>();
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index bb401ab..685bebc 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -122,6 +122,7 @@
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
@@ -2090,7 +2091,49 @@
protected synchronized static boolean isAnyStoreRefCountLeaked()
throws IOException {
if (getUtility() != null) {
- return CompatUtil.isAnyStoreRefCountLeaked(getUtility().getAdmin());
+ return isAnyStoreRefCountLeaked(getUtility().getAdmin());
+ }
+ return false;
+ }
+
+ /**
+ * HBase 2.3+ has storeRefCount available in RegionMetrics
+ *
+ * @param admin Admin instance
+ * @return true if any region has refCount leakage
+ * @throws IOException if something went wrong while connecting to Admin
+ */
+ public synchronized static boolean isAnyStoreRefCountLeaked(Admin admin)
+ throws IOException {
+ int retries = 5;
+ while (retries > 0) {
+ boolean isStoreRefCountLeaked = isStoreRefCountLeaked(admin);
+ if (!isStoreRefCountLeaked) {
+ return false;
+ }
+ retries--;
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ LOGGER.error("Interrupted while sleeping", e);
+ break;
+ }
+ }
+ return true;
+ }
+
+ private static boolean isStoreRefCountLeaked(Admin admin)
+ throws IOException {
+ for (ServerName serverName : admin.getRegionServers()) {
+ for (RegionMetrics regionMetrics : admin.getRegionMetrics(serverName)) {
+ int regionTotalRefCount = regionMetrics.getStoreRefCount();
+ if (regionTotalRefCount > 0) {
+ LOGGER.error("Region {} has refCount leak. Total refCount"
+ + " of all storeFiles combined for the region: {}",
+ regionMetrics.getNameAsString(), regionTotalRefCount);
+ return true;
+ }
+ }
}
return false;
}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java b/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java
similarity index 98%
rename from phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
rename to phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java
index 0c7dfd8..cd25806 100644
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/DelegateCell.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.phoenix.compat.hbase.test;
+package org.apache.phoenix.query;
import org.apache.hadoop.hbase.Cell;
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
index 67d2371..93e8d3e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.compat.hbase.test.DelegateCell;
import org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList;
import org.junit.Test;
diff --git a/phoenix-hbase-compat-2.1.6/pom.xml b/phoenix-hbase-compat-2.1.6/pom.xml
deleted file mode 100644
index 9c92608..0000000
--- a/phoenix-hbase-compat-2.1.6/pom.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project
- xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation=
- "http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix</artifactId>
- <version>5.2.0-SNAPSHOT</version>
- </parent>
-
- <artifactId>phoenix-hbase-compat-2.1.6</artifactId>
-
- <name>Phoenix Hbase 2.1.6 compatibility</name>
- <description>Compatibility module for HBase 2.1.6+</description>
-
- <properties>
- <hbase21.compat.version>2.1.6</hbase21.compat.version>
- </properties>
-
- <dependencies>
- <!-- HBase dependencies -->
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-client</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-common</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-server</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <!-- Override parent dependencyManagement for transitive HBase dependencies -->
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-hadoop-compat</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-hadoop2-compat</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-protocol</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-protocol-shaded</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-zookeeper</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-metrics</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-metrics-api</artifactId>
- <version>${hbase21.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <!-- Build with -Dwithout.tephra fails without this -->
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
-</project>
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
deleted file mode 100644
index 089e44a..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Table;
-
-public abstract class CompatDelegateHTable implements Table {
-
- protected final Table delegate;
-
- public CompatDelegateHTable(Table delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public void mutateRow(RowMutations rm) throws IOException {
- delegate.mutateRow(rm);
- }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
deleted file mode 100644
index 573196e..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Table;
-
-public abstract class CompatOmidTransactionTable implements Table {
-
- @Override
- public void mutateRow(RowMutations rm) throws IOException {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
deleted file mode 100644
index 78068ab..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.security.access.TableAuthManager;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-
-public class CompatPermissionUtil {
-
- private CompatPermissionUtil() {
- //Not to be instantiated
- }
-
- public static AccessChecker newAccessChecker(final Configuration conf, ZKWatcher zk) {
- return new AccessChecker(conf, zk);
- }
-
- public static void stopAccessChecker(AccessChecker accessChecker) throws IOException {
- if (accessChecker.getAuthManager() != null) {
- TableAuthManager.release(accessChecker.getAuthManager());
- }
- }
-
- public static String getUserFromUP(UserPermission userPermission) {
- return Bytes.toString(userPermission.getUser());
- }
-
- public static Permission getPermissionFromUP(UserPermission userPermission) {
- return userPermission;
- }
-
- public static boolean authorizeUserTable(AccessChecker accessChecker, User user,
- TableName table, Permission.Action action) {
- if(accessChecker.getAuthManager().userHasAccess(user, table, action)) {
- return true;
- }
- String[] groupNames = user.getGroupNames();
- if (groupNames != null) {
- for (String group : groupNames) {
- if(accessChecker.getAuthManager().groupHasAccess(group, table, action)) {
- return true;
- }
- }
- }
- return false;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
deleted file mode 100644
index 194c4c3..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-
-/**
- * {@link RpcScheduler} that first checks to see if this is an index or metadata update before
- * passing off the call to the delegate {@link RpcScheduler}.
- */
-public abstract class CompatPhoenixRpcScheduler extends RpcScheduler {
- protected RpcScheduler delegate;
-
- @Override
- public int getMetaPriorityQueueLength() {
- return this.delegate.getMetaPriorityQueueLength();
- }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatSteppingSplitPolicy.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatSteppingSplitPolicy.java
deleted file mode 100644
index 5f700b0..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatSteppingSplitPolicy.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy;
-
-public class CompatSteppingSplitPolicy extends SteppingSplitPolicy {
-
- protected boolean canSplit() {
- //dummy
- return false;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
deleted file mode 100644
index 9867ac6..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.regionserver.StoreFileReader;
-
-public class CompatStoreFileReader extends StoreFileReader {
-
- public CompatStoreFileReader(final FileSystem fs, final Path p,
- final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
- boolean primaryReplicaStoreFile, AtomicInteger refCount, final Configuration conf)
- throws IOException {
- super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, refCount, false, conf);
- }
-
-}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
deleted file mode 100644
index be773ef..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.util.ChecksumType;
-import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
-
-import java.io.IOException;
-
-public class CompatUtil {
-
- private CompatUtil() {
- // Not to be instantiated
- }
-
- public static int getCellSerializedSize(Cell cell) {
- return org.apache.hadoop.hbase.KeyValueUtil.length(cell);
- }
-
- public static ListMultimap<String, ? extends Permission> readPermissions(byte[] data,
- Configuration conf) throws DeserializationException {
- return AccessControlLists.readPermissions(data, conf);
- }
-
- public static HFileContextBuilder withComparator(HFileContextBuilder contextBuilder,
- CellComparatorImpl cellComparator) {
- return contextBuilder;
- }
-
- public static StoreFileWriter.Builder withComparator(StoreFileWriter.Builder builder,
- CellComparatorImpl cellComparator) {
- return builder.withComparator(cellComparator);
- }
-
- public static Scan getScanForTableName(Connection conn, TableName tableName) {
- return MetaTableAccessor.getScanForTableName(conn, tableName);
- }
-
- /**
- * HBase 2.3+ has storeRefCount available in RegionMetrics
- *
- * @param admin Admin instance
- * @return true if any region has refCount leakage
- * @throws IOException if something went wrong while connecting to Admin
- */
- public static boolean isAnyStoreRefCountLeaked(Admin admin)
- throws IOException {
- return false;
- }
-
- public static ChecksumType getChecksumType(Configuration conf) {
- return HStore.getChecksumType(conf);
- }
-
- public static int getBytesPerChecksum(Configuration conf) {
- return HStore.getBytesPerChecksum(conf);
- }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
deleted file mode 100644
index 6931eb4..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.compat.hbase;
-
-public class HbaseCompatCapabilities {
-
- public static boolean isMaxLookbackTimeSupported() {
- return false;
- }
-
- //In HBase 2.1 and 2.2, a lookback query won't return any results if covered by a future delete
- public static boolean isLookbackBeyondDeletesSupported() { return false; }
-
- //HBase 2.1 does not have HBASE-22710, which is necessary for raw scan skip scan and
- // AllVersionsIndexRebuild filters to
- // show all versions properly. HBase 2.2.5+ and HBase 2.3.0+ have this fix.
- public static boolean isRawFilterSupported() { return false; }
-
- //HBase 2.3+ has preWALAppend() on RegionObserver (HBASE-22623)
- public static boolean hasPreWALAppend() { return false; }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
deleted file mode 100644
index e0e7258..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class OffsetCell implements Cell {
-
- private Cell cell;
- private int offset;
-
- public OffsetCell(Cell cell, int offset) {
- this.cell = cell;
- this.offset = offset;
- }
-
- @Override
- public byte[] getRowArray() {
- return cell.getRowArray();
- }
-
- @Override
- public int getRowOffset() {
- return cell.getRowOffset() + offset;
- }
-
- @Override
- public short getRowLength() {
- return (short) (cell.getRowLength() - offset);
- }
-
- @Override
- public byte[] getFamilyArray() {
- return cell.getFamilyArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return cell.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return cell.getFamilyLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return cell.getQualifierArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return cell.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return cell.getQualifierLength();
- }
-
- @Override
- public long getTimestamp() {
- return cell.getTimestamp();
- }
-
- @Override
- public byte getTypeByte() {
- return cell.getTypeByte();
- }
-
- @Override public long getSequenceId() {
- return cell.getSequenceId();
- }
-
- @Override
- public byte[] getValueArray() {
- return cell.getValueArray();
- }
-
- @Override
- public int getValueOffset() {
- return cell.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return cell.getValueLength();
- }
-
- @Override
- public byte[] getTagsArray() {
- return cell.getTagsArray();
- }
-
- @Override
- public int getTagsOffset() {
- return cell.getTagsOffset();
- }
-
- @Override
- public int getTagsLength() {
- return cell.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return cell.getType();
- }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
deleted file mode 100644
index a3d12a4..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.ScanOptions;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-
-import java.io.IOException;
-
-public class CompatBaseScannerRegionObserver implements RegionObserver {
-
- public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY =
- "phoenix.max.lookback.age.seconds";
- public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0;
-
- public void preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't implement the "max
- //lookback age" feature
- }
-
- public void preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't implement the "max
- //lookback age" feature
- }
-
- public void preMemStoreCompactionCompactScannerOpen(
- ObserverContext<RegionCoprocessorEnvironment> c, Store store, ScanOptions options)
- throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't implement the "max
- //lookback age" feature
- }
-
- public void preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, Store store,
- ScanOptions options) throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't override the scan
- //to "look behind" delete markers on SCN queries
- }
-
- public static long getMaxLookbackInMillis(Configuration conf){
- //config param is in seconds, switch to millis
- return conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000;
- }
-
- //max lookback age isn't supported in HBase 2.1 or HBase 2.2
- public static boolean isMaxLookbackTimeEnabled(Configuration conf){
- return false;
- }
-
- public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime){
- return false;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
deleted file mode 100644
index b552e81..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WALKey;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class CompatIndexRegionObserver implements RegionObserver {
-
- public void preWALAppend(ObserverContext<RegionCoprocessorEnvironment> c, WALKey key,
- WALEdit edit) {
- //no-op implementation for HBase 2.1 and 2.2 that doesn't support this co-proc hook.
- }
-
- public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) {
- //no-op for HBase 2.1 and 2.2 because we don't have WALKey.addExtendedAttribute(String,
- // byte[])
- }
-
- public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) {
- return null;
- }
-
- public static Map<String, byte[]> getAttributeValuesFromWALKey(WALKey key) {
- return new HashMap<String, byte[]>();
- }
-}
diff --git a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
deleted file mode 100644
index ed0079d..0000000
--- a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.test;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class DelegateCell implements Cell {
- private final Cell delegate;
- private final String name;
- public DelegateCell(Cell delegate, String name) {
- this.delegate = delegate;
- this.name = name;
- }
-
- @Override
- public int getValueOffset() {
- return delegate.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return delegate.getValueLength();
- }
-
- @Override
- public byte[] getValueArray() {
- return delegate.getValueArray();
- }
-
- @Override
- public byte getTypeByte() {
- return delegate.getTypeByte();
- }
-
- @Override
- public long getTimestamp() {
- return delegate.getTimestamp();
- }
-
- @Override
- public int getTagsOffset() {
- return delegate.getTagsOffset();
- }
-
- @Override
- public byte[] getTagsArray() {
- return delegate.getTagsArray();
- }
-
- @Override
- public int getRowOffset() {
- return delegate.getRowOffset();
- }
-
- @Override
- public short getRowLength() {
- return delegate.getRowLength();
- }
-
- @Override
- public byte[] getRowArray() {
- return delegate.getRowArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return delegate.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return delegate.getQualifierLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return delegate.getQualifierArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return delegate.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return delegate.getFamilyLength();
- }
-
- @Override
- public byte[] getFamilyArray() {
- return delegate.getFamilyArray();
- }
-
- @Override
- public String toString() {
- return name;
- }
-
- @Override
- public long getSequenceId() {
- return delegate.getSequenceId();
- }
-
- @Override
- public int getTagsLength() {
- return delegate.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return delegate.getType();
- }
-}
diff --git a/phoenix-hbase-compat-2.2.5/pom.xml b/phoenix-hbase-compat-2.2.5/pom.xml
deleted file mode 100644
index a0bd138..0000000
--- a/phoenix-hbase-compat-2.2.5/pom.xml
+++ /dev/null
@@ -1,108 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<project
- xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation=
- "http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.apache.phoenix</groupId>
- <artifactId>phoenix</artifactId>
- <version>5.2.0-SNAPSHOT</version>
- </parent>
-
- <artifactId>phoenix-hbase-compat-2.2.5</artifactId>
- <name>Phoenix Hbase 2.2.5 compatibility</name>
- <description>Compatibility module for HBase 2.2.5+</description>
-
- <properties>
- <hbase22.compat.version>2.2.5</hbase22.compat.version>
- </properties>
-
- <dependencies>
- <!-- HBase dependencies -->
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-client</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-common</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-server</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <!-- Override parent dependencyManagement for transitive HBase dependencies -->
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-hadoop-compat</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-hadoop2-compat</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-protocol</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-protocol-shaded</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-zookeeper</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-metrics</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-metrics-api</artifactId>
- <version>${hbase22.compat.version}</version>
- <scope>provided</scope>
- </dependency>
- <!-- Build with -Dwithout.tephra fails without this -->
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
-</project>
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
deleted file mode 100644
index a862073..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Table;
-
-public abstract class CompatDelegateHTable implements Table {
-
- protected final Table delegate;
-
- public CompatDelegateHTable(Table delegate) {
- this.delegate = delegate;
- }
-
- @Override
- public void mutateRow(RowMutations rm) throws IOException {
- delegate.mutateRow(rm);
- }
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
deleted file mode 100644
index 573196e..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Table;
-
-public abstract class CompatOmidTransactionTable implements Table {
-
- @Override
- public void mutateRow(RowMutations rm) throws IOException {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
deleted file mode 100644
index 4a13cfd..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-
-public class CompatPermissionUtil {
-
- private CompatPermissionUtil() {
- //Not to be instantiated
- }
-
- public static AccessChecker newAccessChecker(final Configuration conf, ZKWatcher zk) {
- return new AccessChecker(conf, zk);
- }
-
- public static void stopAccessChecker(AccessChecker accessChecker) throws IOException {
- accessChecker.stop();
- }
-
- public static String getUserFromUP(UserPermission userPermission) {
- return userPermission.getUser();
- }
-
- public static Permission getPermissionFromUP(UserPermission userPermission) {
- return userPermission.getPermission();
- }
-
- public static boolean authorizeUserTable(AccessChecker accessChecker, User user,
- TableName table, Permission.Action action) {
- // This also checks for group access
- return accessChecker.getAuthManager().authorizeUserTable(user, table, action);
- }
-
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
deleted file mode 100644
index b749a25..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-
-/**
- * {@link RpcScheduler} that first checks to see if this is an index or metadata update before
- * passing off the call to the delegate {@link RpcScheduler}.
- */
-public abstract class CompatPhoenixRpcScheduler extends RpcScheduler {
- protected RpcScheduler delegate;
-
- @Override
- public int getMetaPriorityQueueLength() {
- return this.delegate.getMetaPriorityQueueLength();
- }
-
- @Override
- public int getActiveGeneralRpcHandlerCount() {
- return this.delegate.getActiveGeneralRpcHandlerCount();
- }
-
- @Override
- public int getActivePriorityRpcHandlerCount() {
- return this.delegate.getActivePriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveMetaPriorityRpcHandlerCount() {
- return this.delegate.getActiveMetaPriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveReplicationRpcHandlerCount() {
- return this.delegate.getActiveReplicationRpcHandlerCount();
- }
-
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatSteppingSplitPolicy.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatSteppingSplitPolicy.java
deleted file mode 100644
index 5f700b0..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatSteppingSplitPolicy.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy;
-
-public class CompatSteppingSplitPolicy extends SteppingSplitPolicy {
-
- protected boolean canSplit() {
- //dummy
- return false;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
deleted file mode 100644
index 9867ac6..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.regionserver.StoreFileReader;
-
-public class CompatStoreFileReader extends StoreFileReader {
-
- public CompatStoreFileReader(final FileSystem fs, final Path p,
- final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
- boolean primaryReplicaStoreFile, AtomicInteger refCount, final Configuration conf)
- throws IOException {
- super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, refCount, false, conf);
- }
-
-}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
deleted file mode 100644
index be4b2d4..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparatorImpl;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.util.ChecksumType;
-import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
-
-import java.io.IOException;
-
-public class CompatUtil {
-
- private CompatUtil() {
- // Not to be instantiated
- }
-
- public static int getCellSerializedSize(Cell cell) {
- return cell.getSerializedSize();
- }
-
- public static ListMultimap<String, ? extends Permission> readPermissions(byte[] data,
- Configuration conf) throws DeserializationException {
- return AccessControlLists.readPermissions(data, conf);
- }
-
- public static HFileContextBuilder withComparator(HFileContextBuilder contextBuilder,
- CellComparatorImpl cellComparator) {
- return contextBuilder;
- }
-
- public static StoreFileWriter.Builder withComparator(StoreFileWriter.Builder builder,
- CellComparatorImpl cellComparator) {
- return builder.withComparator(cellComparator);
- }
-
- public static Scan getScanForTableName(Connection conn, TableName tableName) {
- return MetaTableAccessor.getScanForTableName(conn, tableName);
- }
-
- /**
- * HBase 2.3+ has storeRefCount available in RegionMetrics
- *
- * @param admin Admin instance
- * @return true if any region has refCount leakage
- * @throws IOException if something went wrong while connecting to Admin
- */
- public static boolean isAnyStoreRefCountLeaked(Admin admin)
- throws IOException {
- return false;
- }
-
- public static ChecksumType getChecksumType(Configuration conf) {
- return HStore.getChecksumType(conf);
- }
-
- public static int getBytesPerChecksum(Configuration conf) {
- return HStore.getBytesPerChecksum(conf);
- }
-
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
deleted file mode 100644
index 620d4a7..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.phoenix.compat.hbase;
-
-public class HbaseCompatCapabilities {
-
- public static boolean isMaxLookbackTimeSupported() {
- return false;
- }
-
- //In HBase 2.1 and 2.2, a lookback query won't return any results if covered by a future delete
- public static boolean isLookbackBeyondDeletesSupported() { return false; }
-
- //HBase 2.1 does not have HBASE-22710, which is necessary for raw scan skip scan and
- // AllVersionsIndexRebuild filters to
- // show all versions properly. HBase 2.2.5+ and HBase 2.3.0+ have this fix.
- public static boolean isRawFilterSupported() { return true; }
-
- //HBase 2.3+ has preWALAppend() on RegionObserver (HBASE-22623)
- public static boolean hasPreWALAppend() { return false; }
-
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
deleted file mode 100644
index 2f901ae..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.ScanOptions;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-
-import java.io.IOException;
-
-public class CompatBaseScannerRegionObserver implements RegionObserver {
-
- public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY =
- "phoenix.max.lookback.age.seconds";
- public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0;
-
- public void preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't implement the "max
- //lookback age" feature
- }
-
- public void preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't implement the "max
- //lookback age" feature
- }
-
- public void preMemStoreCompactionCompactScannerOpen(
- ObserverContext<RegionCoprocessorEnvironment> c, Store store, ScanOptions options)
- throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't implement the "max
- //lookback age" feature
- }
-
- public void preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, Store store,
- ScanOptions options) throws IOException {
- //no-op because HBASE-24321 isn't present in HBase 2.1.x, so we can't override the scan
- //to "look behind" delete markers on SCN queries
- }
-
- public static long getMaxLookbackInMillis(Configuration conf){
- //config param is in seconds, switch to millis
- return conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000;
- }
-
- //max lookback age isn't supported in HBase 2.1 or HBase 2.2
- public static boolean isMaxLookbackTimeEnabled(Configuration conf){
- return false;
- }
-
- public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime){
- return false;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
deleted file mode 100644
index b552e81..0000000
--- a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WALKey;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class CompatIndexRegionObserver implements RegionObserver {
-
- public void preWALAppend(ObserverContext<RegionCoprocessorEnvironment> c, WALKey key,
- WALEdit edit) {
- //no-op implementation for HBase 2.1 and 2.2 that doesn't support this co-proc hook.
- }
-
- public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) {
- //no-op for HBase 2.1 and 2.2 because we don't have WALKey.addExtendedAttribute(String,
- // byte[])
- }
-
- public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) {
- return null;
- }
-
- public static Map<String, byte[]> getAttributeValuesFromWALKey(WALKey key) {
- return new HashMap<String, byte[]>();
- }
-}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
index d7ae605..a862073 100644
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
+++ b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
@@ -19,7 +19,6 @@
import java.io.IOException;
-import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
@@ -32,11 +31,6 @@
}
@Override
- public RegionLocator getRegionLocator() throws IOException {
- return delegate.getRegionLocator();
- }
-
- @Override
public void mutateRow(RowMutations rm) throws IOException {
delegate.mutateRow(rm);
}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
index 8baa2f3..573196e 100644
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
+++ b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
@@ -19,18 +19,12 @@
import java.io.IOException;
-import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
public abstract class CompatOmidTransactionTable implements Table {
@Override
- public RegionLocator getRegionLocator() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
public void mutateRow(RowMutations rm) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
deleted file mode 100644
index 80a99b3..0000000
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-
-public class CompatPermissionUtil {
-
- private CompatPermissionUtil() {
- //Not to be instantiated
- }
-
- public static AccessChecker newAccessChecker(final Configuration conf, ZKWatcher zk) {
- //Ignore ZK parameter
- return new AccessChecker(conf);
- }
-
- public static void stopAccessChecker(AccessChecker accessChecker) throws IOException {
- //NOOP
- }
-
- public static String getUserFromUP(UserPermission userPermission) {
- return userPermission.getUser();
- }
-
- public static Permission getPermissionFromUP(UserPermission userPermission) {
- return userPermission.getPermission();
- }
-
- public static boolean authorizeUserTable(AccessChecker accessChecker, User user,
- TableName table, Permission.Action action) {
- // This also checks for group access
- return accessChecker.getAuthManager().authorizeUserTable(user, table, action);
- }
-
-}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
index b749a25..5f0bc0d 100644
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
+++ b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
@@ -25,30 +25,5 @@
*/
public abstract class CompatPhoenixRpcScheduler extends RpcScheduler {
protected RpcScheduler delegate;
-
- @Override
- public int getMetaPriorityQueueLength() {
- return this.delegate.getMetaPriorityQueueLength();
- }
-
- @Override
- public int getActiveGeneralRpcHandlerCount() {
- return this.delegate.getActiveGeneralRpcHandlerCount();
- }
-
- @Override
- public int getActivePriorityRpcHandlerCount() {
- return this.delegate.getActivePriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveMetaPriorityRpcHandlerCount() {
- return this.delegate.getActiveMetaPriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveReplicationRpcHandlerCount() {
- return this.delegate.getActiveReplicationRpcHandlerCount();
- }
-
}
+
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
deleted file mode 100644
index 03aa257..0000000
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFileInfo;
-import org.apache.hadoop.hbase.io.hfile.ReaderContext;
-import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
-import org.apache.hadoop.hbase.regionserver.StoreFileReader;
-
-public class CompatStoreFileReader extends StoreFileReader {
-
- public CompatStoreFileReader(final FileSystem fs, final Path p,
- final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
- boolean primaryReplicaStoreFile, AtomicInteger refCount, final Configuration conf)
- throws IOException {
- super(new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile,
- ReaderType.STREAM),
- new HFileInfo(new ReaderContext(p, in, size, new HFileSystem(fs),
- primaryReplicaStoreFile, ReaderType.STREAM), conf),
- cacheConf, refCount, conf);
- getHFileReader().getHFileInfo().initMetaAndIndex(getHFileReader());
- }
-
-}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 8cfd60b..c76fe95 100644
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -54,15 +54,6 @@
//Not to be instantiated
}
- public static int getCellSerializedSize(Cell cell) {
- return cell.getSerializedSize();
- }
-
- public static ListMultimap<String, ? extends Permission> readPermissions(
- byte[] data, Configuration conf) throws DeserializationException {
- return PermissionStorage.readPermissions(data, conf);
- }
-
public static HFileContext createHFileContext(Configuration conf, Algorithm compression,
Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) {
@@ -75,62 +66,10 @@
.build();
}
- public static HFileContextBuilder withComparator(HFileContextBuilder contextBuilder,
- CellComparatorImpl cellComparator) {
- return contextBuilder.withCellComparator(cellComparator);
- }
-
- public static StoreFileWriter.Builder withComparator(StoreFileWriter.Builder builder,
- CellComparatorImpl cellComparator) {
- return builder;
- }
-
public static Scan getScanForTableName(Connection conn, TableName tableName) {
return MetaTableAccessor.getScanForTableName(conn, tableName);
}
- /**
- * HBase 2.3+ has storeRefCount available in RegionMetrics
- *
- * @param admin Admin instance
- * @return true if any region has refCount leakage
- * @throws IOException if something went wrong while connecting to Admin
- */
- public synchronized static boolean isAnyStoreRefCountLeaked(Admin admin)
- throws IOException {
- int retries = 5;
- while (retries > 0) {
- boolean isStoreRefCountLeaked = isStoreRefCountLeaked(admin);
- if (!isStoreRefCountLeaked) {
- return false;
- }
- retries--;
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- LOGGER.error("Interrupted while sleeping", e);
- break;
- }
- }
- return true;
- }
-
- private static boolean isStoreRefCountLeaked(Admin admin)
- throws IOException {
- for (ServerName serverName : admin.getRegionServers()) {
- for (RegionMetrics regionMetrics : admin.getRegionMetrics(serverName)) {
- int regionTotalRefCount = regionMetrics.getStoreRefCount();
- if (regionTotalRefCount > 0) {
- LOGGER.error("Region {} has refCount leak. Total refCount"
- + " of all storeFiles combined for the region: {}",
- regionMetrics.getNameAsString(), regionTotalRefCount);
- return true;
- }
- }
- }
- return false;
- }
-
public static ChecksumType getChecksumType(Configuration conf) {
return HStore.getChecksumType(conf);
}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
index 2cb9cdf..bf4c3c9 100644
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
+++ b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
@@ -20,20 +20,7 @@
public class HbaseCompatCapabilities {
- public static boolean isMaxLookbackTimeSupported() {
- return true;
- }
+ // Currently each supported HBase version has the same capabilities, so there is
+ // nothing in here.
- //In HBase 2.1 and 2.2, a lookback query won't return any results if covered by a future delete,
- //but in 2.3 and later we have the preSoreScannerOpen hook that overrides that behavior
- public static boolean isLookbackBeyondDeletesSupported() { return true; }
-
- //HBase 2.1 does not have HBASE-22710, which is necessary for raw scan skip scan and
- // AllVersionsIndexRebuild filters to
- // show all versions properly. HBase 2.2.5+ and HBase 2.3.0+ have this fix.
- public static boolean isRawFilterSupported() { return true; }
-
- //HBase 2.3+ has preWALAppend() on RegionObserver (HBASE-22623)
- public static boolean hasPreWALAppend() { return true; }
-
-}
+}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
deleted file mode 100644
index c5485a5..0000000
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class OffsetCell implements Cell {
-
- private Cell cell;
- private int offset;
-
- public OffsetCell(Cell cell, int offset) {
- this.cell = cell;
- this.offset = offset;
- }
-
- @Override
- public byte[] getRowArray() {
- return cell.getRowArray();
- }
-
- @Override
- public int getRowOffset() {
- return cell.getRowOffset() + offset;
- }
-
- @Override
- public short getRowLength() {
- return (short) (cell.getRowLength() - offset);
- }
-
- @Override
- public byte[] getFamilyArray() {
- return cell.getFamilyArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return cell.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return cell.getFamilyLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return cell.getQualifierArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return cell.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return cell.getQualifierLength();
- }
-
- @Override
- public long getTimestamp() {
- return cell.getTimestamp();
- }
-
- @Override
- public byte getTypeByte() {
- return cell.getTypeByte();
- }
-
- @Override public long getSequenceId() {
- return cell.getSequenceId();
- }
-
- @Override
- public byte[] getValueArray() {
- return cell.getValueArray();
- }
-
- @Override
- public int getValueOffset() {
- return cell.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return cell.getValueLength();
- }
-
- @Override
- public byte[] getTagsArray() {
- return cell.getTagsArray();
- }
-
- @Override
- public int getTagsOffset() {
- return cell.getTagsOffset();
- }
-
- @Override
- public int getTagsLength() {
- return cell.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return cell.getType();
- }
-
- @Override
- public long heapSize() {
- return cell.heapSize();
- }
-
- @Override
- public int getSerializedSize() {
- return cell.getSerializedSize() - offset;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
deleted file mode 100644
index cd1a7f5..0000000
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeepDeletedCells;
-import org.apache.hadoop.hbase.MemoryCompactionPolicy;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.ScanOptions;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import java.io.IOException;
-
-public class CompatBaseScannerRegionObserver implements RegionObserver {
-
- public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY =
- "phoenix.max.lookback.age.seconds";
- public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0;
-
- public void preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
- }
- }
-
- public void preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- setScanOptionsForFlushesAndCompactions(conf, options, store, ScanType.COMPACT_RETAIN_DELETES);
- }
- }
-
- public void preMemStoreCompactionCompactScannerOpen(
- ObserverContext<RegionCoprocessorEnvironment> c, Store store, ScanOptions options)
- throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- MemoryCompactionPolicy inMemPolicy =
- store.getColumnFamilyDescriptor().getInMemoryCompaction();
- ScanType scanType;
- //the eager and adaptive in-memory compaction policies can purge versions; the others
- // can't. (Eager always does; adaptive sometimes does)
- if (inMemPolicy.equals(MemoryCompactionPolicy.EAGER) ||
- inMemPolicy.equals(MemoryCompactionPolicy.ADAPTIVE)) {
- scanType = ScanType.COMPACT_DROP_DELETES;
- } else {
- scanType = ScanType.COMPACT_RETAIN_DELETES;
- }
- setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
- }
- }
-
- public void preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, Store store,
- ScanOptions options) throws IOException {
-
- if (!storeFileScanDoesntNeedAlteration(options)) {
- //PHOENIX-4277 -- When doing a point-in-time (SCN) Scan, HBase by default will hide
- // mutations that happen before a delete marker. This overrides that behavior.
- options.setMinVersions(options.getMinVersions());
- KeepDeletedCells keepDeletedCells = KeepDeletedCells.TRUE;
- if (store.getColumnFamilyDescriptor().getTimeToLive() != HConstants.FOREVER) {
- keepDeletedCells = KeepDeletedCells.TTL;
- }
- options.setKeepDeletedCells(keepDeletedCells);
- }
- }
-
- private boolean storeFileScanDoesntNeedAlteration(ScanOptions options) {
- Scan scan = options.getScan();
- boolean isRaw = scan.isRaw();
- //true if keep deleted cells is either TRUE or TTL
- boolean keepDeletedCells = options.getKeepDeletedCells().equals(KeepDeletedCells.TRUE) ||
- options.getKeepDeletedCells().equals(KeepDeletedCells.TTL);
- boolean timeRangeIsLatest = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP;
- boolean timestampIsTransactional =
- isTransactionalTimestamp(scan.getTimeRange().getMax());
- return isRaw
- || keepDeletedCells
- || timeRangeIsLatest
- || timestampIsTransactional;
- }
-
- private boolean isTransactionalTimestamp(long ts) {
- //have to use the HBase edge manager because the Phoenix one is in phoenix-core
- return ts > (long) (EnvironmentEdgeManager.currentTime() * 1.1);
- }
-
- /*
- * If KeepDeletedCells.FALSE, KeepDeletedCells.TTL ,
- * let delete markers age once lookback age is done.
- */
- public KeepDeletedCells getKeepDeletedCells(ScanOptions options, ScanType scanType) {
- //if we're doing a minor compaction or flush, always set keep deleted cells
- //to true. Otherwise, if keep deleted cells is false or TTL, use KeepDeletedCells TTL,
- //where the value of the ttl might be overriden to the max lookback age elsewhere
- return (options.getKeepDeletedCells() == KeepDeletedCells.TRUE
- || scanType.equals(ScanType.COMPACT_RETAIN_DELETES)) ?
- KeepDeletedCells.TRUE : KeepDeletedCells.TTL;
- }
-
- /*
- * if the user set a TTL we should leave MIN_VERSIONS at the default (0 in most of the cases).
- * Otherwise the data (1st version) will not be removed after the TTL. If no TTL, we want
- * Math.max(maxVersions, minVersions, 1)
- */
- public int getMinVersions(ScanOptions options, ColumnFamilyDescriptor cfDescriptor) {
- return cfDescriptor.getTimeToLive() != HConstants.FOREVER ? options.getMinVersions()
- : Math.max(Math.max(options.getMinVersions(),
- cfDescriptor.getMaxVersions()),1);
- }
-
- /**
- *
- * @param conf HBase Configuration
- * @param columnDescriptor ColumnFamilyDescriptor for the store being compacted
- * @param options ScanOptions of overrides to the compaction scan
- * @return Time to live in milliseconds, based on both HBase TTL and Phoenix max lookback age
- */
- public long getTimeToLiveForCompactions(Configuration conf,
- ColumnFamilyDescriptor columnDescriptor,
- ScanOptions options) {
- long ttlConfigured = columnDescriptor.getTimeToLive();
- long ttlInMillis = ttlConfigured * 1000;
- long maxLookbackTtl = getMaxLookbackInMillis(conf);
- if (isMaxLookbackTimeEnabled(maxLookbackTtl)) {
- if (ttlConfigured == HConstants.FOREVER
- && columnDescriptor.getKeepDeletedCells() != KeepDeletedCells.TRUE) {
- // If user configured default TTL(FOREVER) and keep deleted cells to false or
- // TTL then to remove unwanted delete markers we should change ttl to max lookback age
- ttlInMillis = maxLookbackTtl;
- } else {
- //if there is a TTL, use TTL instead of max lookback age.
- // Max lookback age should be more recent or equal to TTL
- ttlInMillis = Math.max(ttlInMillis, maxLookbackTtl);
- }
- }
-
- return ttlInMillis;
- }
-
- public void setScanOptionsForFlushesAndCompactions(Configuration conf,
- ScanOptions options,
- final Store store,
- ScanType type) {
- ColumnFamilyDescriptor cfDescriptor = store.getColumnFamilyDescriptor();
- options.setTTL(getTimeToLiveForCompactions(conf, cfDescriptor,
- options));
- options.setKeepDeletedCells(getKeepDeletedCells(options, type));
- options.setMaxVersions(Integer.MAX_VALUE);
- options.setMinVersions(getMinVersions(options, cfDescriptor));
- }
-
- public static long getMaxLookbackInMillis(Configuration conf){
- //config param is in seconds, switch to millis
- return conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000;
- }
-
- public static boolean isMaxLookbackTimeEnabled(Configuration conf){
- return isMaxLookbackTimeEnabled(conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE));
- }
-
- public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime){
- return maxLookbackTime > 0L;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
deleted file mode 100644
index f887ed3..0000000
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.wal.WALKey;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class CompatIndexRegionObserver implements RegionObserver {
-
- public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) {
- key.addExtendedAttribute(attrKey, attrValue);
- }
-
- public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) {
- return key.getExtendedAttribute(attrKey);
- }
-
- public static Map<String, byte[]> getAttributeValuesFromWALKey(WALKey key) {
- return new HashMap<String, byte[]>(key.getExtendedAttributes());
- }
-
-}
diff --git a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
deleted file mode 100644
index 0c7dfd8..0000000
--- a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.test;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class DelegateCell implements Cell {
- private final Cell delegate;
- private final String name;
- public DelegateCell(Cell delegate, String name) {
- this.delegate = delegate;
- this.name = name;
- }
-
- @Override
- public int getValueOffset() {
- return delegate.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return delegate.getValueLength();
- }
-
- @Override
- public byte[] getValueArray() {
- return delegate.getValueArray();
- }
-
- @Override
- public byte getTypeByte() {
- return delegate.getTypeByte();
- }
-
- @Override
- public long getTimestamp() {
- return delegate.getTimestamp();
- }
-
- @Override
- public int getTagsOffset() {
- return delegate.getTagsOffset();
- }
-
- @Override
- public byte[] getTagsArray() {
- return delegate.getTagsArray();
- }
-
- @Override
- public int getRowOffset() {
- return delegate.getRowOffset();
- }
-
- @Override
- public short getRowLength() {
- return delegate.getRowLength();
- }
-
- @Override
- public byte[] getRowArray() {
- return delegate.getRowArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return delegate.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return delegate.getQualifierLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return delegate.getQualifierArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return delegate.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return delegate.getFamilyLength();
- }
-
- @Override
- public byte[] getFamilyArray() {
- return delegate.getFamilyArray();
- }
-
- @Override
- public String toString() {
- return name;
- }
-
- @Override
- public long getSequenceId() {
- return delegate.getSequenceId();
- }
-
- @Override
- public int getTagsLength() {
- return delegate.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return delegate.getType();
- }
-
- @Override
- public long heapSize() {
- return delegate.heapSize();
- }
-
- @Override
- public int getSerializedSize() {
- return delegate.getSerializedSize();
- }
-}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
index a255732..b6e360c 100644
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
+++ b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
@@ -19,7 +19,6 @@
import java.io.IOException;
-import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
@@ -33,11 +32,6 @@
}
@Override
- public RegionLocator getRegionLocator() throws IOException {
- return delegate.getRegionLocator();
- }
-
- @Override
public Result mutateRow(RowMutations rm) throws IOException {
return delegate.mutateRow(rm);
}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
index ae7992f..bddc7a5 100644
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
+++ b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
@@ -19,7 +19,6 @@
import java.io.IOException;
-import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
@@ -27,11 +26,6 @@
public abstract class CompatOmidTransactionTable implements Table {
@Override
- public RegionLocator getRegionLocator() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
public Result mutateRow(RowMutations rm) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
deleted file mode 100644
index 80a99b3..0000000
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-
-public class CompatPermissionUtil {
-
- private CompatPermissionUtil() {
- //Not to be instantiated
- }
-
- public static AccessChecker newAccessChecker(final Configuration conf, ZKWatcher zk) {
- //Ignore ZK parameter
- return new AccessChecker(conf);
- }
-
- public static void stopAccessChecker(AccessChecker accessChecker) throws IOException {
- //NOOP
- }
-
- public static String getUserFromUP(UserPermission userPermission) {
- return userPermission.getUser();
- }
-
- public static Permission getPermissionFromUP(UserPermission userPermission) {
- return userPermission.getPermission();
- }
-
- public static boolean authorizeUserTable(AccessChecker accessChecker, User user,
- TableName table, Permission.Action action) {
- // This also checks for group access
- return accessChecker.getAuthManager().authorizeUserTable(user, table, action);
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
index b749a25..4ed9c91 100644
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
+++ b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
@@ -25,30 +25,4 @@
*/
public abstract class CompatPhoenixRpcScheduler extends RpcScheduler {
protected RpcScheduler delegate;
-
- @Override
- public int getMetaPriorityQueueLength() {
- return this.delegate.getMetaPriorityQueueLength();
- }
-
- @Override
- public int getActiveGeneralRpcHandlerCount() {
- return this.delegate.getActiveGeneralRpcHandlerCount();
- }
-
- @Override
- public int getActivePriorityRpcHandlerCount() {
- return this.delegate.getActivePriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveMetaPriorityRpcHandlerCount() {
- return this.delegate.getActiveMetaPriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveReplicationRpcHandlerCount() {
- return this.delegate.getActiveReplicationRpcHandlerCount();
- }
-
}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
deleted file mode 100644
index 03aa257..0000000
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFileInfo;
-import org.apache.hadoop.hbase.io.hfile.ReaderContext;
-import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
-import org.apache.hadoop.hbase.regionserver.StoreFileReader;
-
-public class CompatStoreFileReader extends StoreFileReader {
-
- public CompatStoreFileReader(final FileSystem fs, final Path p,
- final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
- boolean primaryReplicaStoreFile, AtomicInteger refCount, final Configuration conf)
- throws IOException {
- super(new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile,
- ReaderType.STREAM),
- new HFileInfo(new ReaderContext(p, in, size, new HFileSystem(fs),
- primaryReplicaStoreFile, ReaderType.STREAM), conf),
- cacheConf, refCount, conf);
- getHFileReader().getHFileInfo().initMetaAndIndex(getHFileReader());
- }
-
-}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 2159550..46966af 100644
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -54,15 +54,6 @@
//Not to be instantiated
}
- public static int getCellSerializedSize(Cell cell) {
- return cell.getSerializedSize();
- }
-
- public static ListMultimap<String, ? extends Permission> readPermissions(
- byte[] data, Configuration conf) throws DeserializationException {
- return PermissionStorage.readPermissions(data, conf);
- }
-
public static HFileContext createHFileContext(Configuration conf, Algorithm compression,
Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) {
@@ -75,63 +66,10 @@
.build();
}
- public static HFileContextBuilder withComparator(HFileContextBuilder contextBuilder,
- CellComparatorImpl cellComparator) {
- return contextBuilder.withCellComparator(cellComparator);
- }
-
- public static StoreFileWriter.Builder withComparator(StoreFileWriter.Builder builder,
- CellComparatorImpl cellComparator) {
- return builder;
- }
-
public static Scan getScanForTableName(Connection conn, TableName tableName) {
return MetaTableAccessor.getScanForTableName(conn.getConfiguration(), tableName);
}
-
- /**
- * HBase 2.3+ has storeRefCount available in RegionMetrics
- *
- * @param admin Admin instance
- * @return true if any region has refCount leakage
- * @throws IOException if something went wrong while connecting to Admin
- */
- public synchronized static boolean isAnyStoreRefCountLeaked(Admin admin)
- throws IOException {
- int retries = 5;
- while (retries > 0) {
- boolean isStoreRefCountLeaked = isStoreRefCountLeaked(admin);
- if (!isStoreRefCountLeaked) {
- return false;
- }
- retries--;
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- LOGGER.error("Interrupted while sleeping", e);
- break;
- }
- }
- return true;
- }
-
- private static boolean isStoreRefCountLeaked(Admin admin)
- throws IOException {
- for (ServerName serverName : admin.getRegionServers()) {
- for (RegionMetrics regionMetrics : admin.getRegionMetrics(serverName)) {
- int regionTotalRefCount = regionMetrics.getStoreRefCount();
- if (regionTotalRefCount > 0) {
- LOGGER.error("Region {} has refCount leak. Total refCount"
- + " of all storeFiles combined for the region: {}",
- regionMetrics.getNameAsString(), regionTotalRefCount);
- return true;
- }
- }
- }
- return false;
- }
-
public static ChecksumType getChecksumType(Configuration conf) {
return HStore.getChecksumType(conf);
}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
index 80aafe6..f63e744 100644
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
+++ b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
@@ -20,20 +20,7 @@
public class HbaseCompatCapabilities {
- public static boolean isMaxLookbackTimeSupported() {
- return true;
- }
-
- //In HBase 2.1 and 2.2, a lookback query won't return any results if covered by a future delete,
- //but in 2.3 and later we have the preSoreScannerOpen hook that overrides that behavior
- public static boolean isLookbackBeyondDeletesSupported() { return true; }
-
- //HBase 2.1 does not have HBASE-22710, which is necessary for raw scan skip scan and
- // AllVersionsIndexRebuild filters to
- // show all versions properly. HBase 2.2.5+ and HBase 2.3.0+ have this fix.
- public static boolean isRawFilterSupported() { return true; }
-
- //HBase 2.3+ has preWALAppend() on RegionObserver (HBASE-22623)
- public static boolean hasPreWALAppend() { return true; }
+ // Currently each supported HBase version has the same capabilities, so there is
+ // nothing in here.
}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
deleted file mode 100644
index c5485a5..0000000
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class OffsetCell implements Cell {
-
- private Cell cell;
- private int offset;
-
- public OffsetCell(Cell cell, int offset) {
- this.cell = cell;
- this.offset = offset;
- }
-
- @Override
- public byte[] getRowArray() {
- return cell.getRowArray();
- }
-
- @Override
- public int getRowOffset() {
- return cell.getRowOffset() + offset;
- }
-
- @Override
- public short getRowLength() {
- return (short) (cell.getRowLength() - offset);
- }
-
- @Override
- public byte[] getFamilyArray() {
- return cell.getFamilyArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return cell.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return cell.getFamilyLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return cell.getQualifierArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return cell.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return cell.getQualifierLength();
- }
-
- @Override
- public long getTimestamp() {
- return cell.getTimestamp();
- }
-
- @Override
- public byte getTypeByte() {
- return cell.getTypeByte();
- }
-
- @Override public long getSequenceId() {
- return cell.getSequenceId();
- }
-
- @Override
- public byte[] getValueArray() {
- return cell.getValueArray();
- }
-
- @Override
- public int getValueOffset() {
- return cell.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return cell.getValueLength();
- }
-
- @Override
- public byte[] getTagsArray() {
- return cell.getTagsArray();
- }
-
- @Override
- public int getTagsOffset() {
- return cell.getTagsOffset();
- }
-
- @Override
- public int getTagsLength() {
- return cell.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return cell.getType();
- }
-
- @Override
- public long heapSize() {
- return cell.heapSize();
- }
-
- @Override
- public int getSerializedSize() {
- return cell.getSerializedSize() - offset;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
deleted file mode 100644
index cd1a7f5..0000000
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeepDeletedCells;
-import org.apache.hadoop.hbase.MemoryCompactionPolicy;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.ScanOptions;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import java.io.IOException;
-
-public class CompatBaseScannerRegionObserver implements RegionObserver {
-
- public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY =
- "phoenix.max.lookback.age.seconds";
- public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0;
-
- public void preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
- }
- }
-
- public void preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- setScanOptionsForFlushesAndCompactions(conf, options, store, ScanType.COMPACT_RETAIN_DELETES);
- }
- }
-
- public void preMemStoreCompactionCompactScannerOpen(
- ObserverContext<RegionCoprocessorEnvironment> c, Store store, ScanOptions options)
- throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- MemoryCompactionPolicy inMemPolicy =
- store.getColumnFamilyDescriptor().getInMemoryCompaction();
- ScanType scanType;
- //the eager and adaptive in-memory compaction policies can purge versions; the others
- // can't. (Eager always does; adaptive sometimes does)
- if (inMemPolicy.equals(MemoryCompactionPolicy.EAGER) ||
- inMemPolicy.equals(MemoryCompactionPolicy.ADAPTIVE)) {
- scanType = ScanType.COMPACT_DROP_DELETES;
- } else {
- scanType = ScanType.COMPACT_RETAIN_DELETES;
- }
- setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
- }
- }
-
- public void preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, Store store,
- ScanOptions options) throws IOException {
-
- if (!storeFileScanDoesntNeedAlteration(options)) {
- //PHOENIX-4277 -- When doing a point-in-time (SCN) Scan, HBase by default will hide
- // mutations that happen before a delete marker. This overrides that behavior.
- options.setMinVersions(options.getMinVersions());
- KeepDeletedCells keepDeletedCells = KeepDeletedCells.TRUE;
- if (store.getColumnFamilyDescriptor().getTimeToLive() != HConstants.FOREVER) {
- keepDeletedCells = KeepDeletedCells.TTL;
- }
- options.setKeepDeletedCells(keepDeletedCells);
- }
- }
-
- private boolean storeFileScanDoesntNeedAlteration(ScanOptions options) {
- Scan scan = options.getScan();
- boolean isRaw = scan.isRaw();
- //true if keep deleted cells is either TRUE or TTL
- boolean keepDeletedCells = options.getKeepDeletedCells().equals(KeepDeletedCells.TRUE) ||
- options.getKeepDeletedCells().equals(KeepDeletedCells.TTL);
- boolean timeRangeIsLatest = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP;
- boolean timestampIsTransactional =
- isTransactionalTimestamp(scan.getTimeRange().getMax());
- return isRaw
- || keepDeletedCells
- || timeRangeIsLatest
- || timestampIsTransactional;
- }
-
- private boolean isTransactionalTimestamp(long ts) {
- //have to use the HBase edge manager because the Phoenix one is in phoenix-core
- return ts > (long) (EnvironmentEdgeManager.currentTime() * 1.1);
- }
-
- /*
- * If KeepDeletedCells.FALSE, KeepDeletedCells.TTL ,
- * let delete markers age once lookback age is done.
- */
- public KeepDeletedCells getKeepDeletedCells(ScanOptions options, ScanType scanType) {
- //if we're doing a minor compaction or flush, always set keep deleted cells
- //to true. Otherwise, if keep deleted cells is false or TTL, use KeepDeletedCells TTL,
- //where the value of the ttl might be overriden to the max lookback age elsewhere
- return (options.getKeepDeletedCells() == KeepDeletedCells.TRUE
- || scanType.equals(ScanType.COMPACT_RETAIN_DELETES)) ?
- KeepDeletedCells.TRUE : KeepDeletedCells.TTL;
- }
-
- /*
- * if the user set a TTL we should leave MIN_VERSIONS at the default (0 in most of the cases).
- * Otherwise the data (1st version) will not be removed after the TTL. If no TTL, we want
- * Math.max(maxVersions, minVersions, 1)
- */
- public int getMinVersions(ScanOptions options, ColumnFamilyDescriptor cfDescriptor) {
- return cfDescriptor.getTimeToLive() != HConstants.FOREVER ? options.getMinVersions()
- : Math.max(Math.max(options.getMinVersions(),
- cfDescriptor.getMaxVersions()),1);
- }
-
- /**
- *
- * @param conf HBase Configuration
- * @param columnDescriptor ColumnFamilyDescriptor for the store being compacted
- * @param options ScanOptions of overrides to the compaction scan
- * @return Time to live in milliseconds, based on both HBase TTL and Phoenix max lookback age
- */
- public long getTimeToLiveForCompactions(Configuration conf,
- ColumnFamilyDescriptor columnDescriptor,
- ScanOptions options) {
- long ttlConfigured = columnDescriptor.getTimeToLive();
- long ttlInMillis = ttlConfigured * 1000;
- long maxLookbackTtl = getMaxLookbackInMillis(conf);
- if (isMaxLookbackTimeEnabled(maxLookbackTtl)) {
- if (ttlConfigured == HConstants.FOREVER
- && columnDescriptor.getKeepDeletedCells() != KeepDeletedCells.TRUE) {
- // If user configured default TTL(FOREVER) and keep deleted cells to false or
- // TTL then to remove unwanted delete markers we should change ttl to max lookback age
- ttlInMillis = maxLookbackTtl;
- } else {
- //if there is a TTL, use TTL instead of max lookback age.
- // Max lookback age should be more recent or equal to TTL
- ttlInMillis = Math.max(ttlInMillis, maxLookbackTtl);
- }
- }
-
- return ttlInMillis;
- }
-
- public void setScanOptionsForFlushesAndCompactions(Configuration conf,
- ScanOptions options,
- final Store store,
- ScanType type) {
- ColumnFamilyDescriptor cfDescriptor = store.getColumnFamilyDescriptor();
- options.setTTL(getTimeToLiveForCompactions(conf, cfDescriptor,
- options));
- options.setKeepDeletedCells(getKeepDeletedCells(options, type));
- options.setMaxVersions(Integer.MAX_VALUE);
- options.setMinVersions(getMinVersions(options, cfDescriptor));
- }
-
- public static long getMaxLookbackInMillis(Configuration conf){
- //config param is in seconds, switch to millis
- return conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000;
- }
-
- public static boolean isMaxLookbackTimeEnabled(Configuration conf){
- return isMaxLookbackTimeEnabled(conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE));
- }
-
- public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime){
- return maxLookbackTime > 0L;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
deleted file mode 100644
index f887ed3..0000000
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.wal.WALKey;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class CompatIndexRegionObserver implements RegionObserver {
-
- public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) {
- key.addExtendedAttribute(attrKey, attrValue);
- }
-
- public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) {
- return key.getExtendedAttribute(attrKey);
- }
-
- public static Map<String, byte[]> getAttributeValuesFromWALKey(WALKey key) {
- return new HashMap<String, byte[]>(key.getExtendedAttributes());
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
deleted file mode 100644
index 0c7dfd8..0000000
--- a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.test;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class DelegateCell implements Cell {
- private final Cell delegate;
- private final String name;
- public DelegateCell(Cell delegate, String name) {
- this.delegate = delegate;
- this.name = name;
- }
-
- @Override
- public int getValueOffset() {
- return delegate.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return delegate.getValueLength();
- }
-
- @Override
- public byte[] getValueArray() {
- return delegate.getValueArray();
- }
-
- @Override
- public byte getTypeByte() {
- return delegate.getTypeByte();
- }
-
- @Override
- public long getTimestamp() {
- return delegate.getTimestamp();
- }
-
- @Override
- public int getTagsOffset() {
- return delegate.getTagsOffset();
- }
-
- @Override
- public byte[] getTagsArray() {
- return delegate.getTagsArray();
- }
-
- @Override
- public int getRowOffset() {
- return delegate.getRowOffset();
- }
-
- @Override
- public short getRowLength() {
- return delegate.getRowLength();
- }
-
- @Override
- public byte[] getRowArray() {
- return delegate.getRowArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return delegate.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return delegate.getQualifierLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return delegate.getQualifierArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return delegate.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return delegate.getFamilyLength();
- }
-
- @Override
- public byte[] getFamilyArray() {
- return delegate.getFamilyArray();
- }
-
- @Override
- public String toString() {
- return name;
- }
-
- @Override
- public long getSequenceId() {
- return delegate.getSequenceId();
- }
-
- @Override
- public int getTagsLength() {
- return delegate.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return delegate.getType();
- }
-
- @Override
- public long heapSize() {
- return delegate.heapSize();
- }
-
- @Override
- public int getSerializedSize() {
- return delegate.getSerializedSize();
- }
-}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
index a255732..b6e360c 100644
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
+++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
@@ -19,7 +19,6 @@
import java.io.IOException;
-import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
@@ -33,11 +32,6 @@
}
@Override
- public RegionLocator getRegionLocator() throws IOException {
- return delegate.getRegionLocator();
- }
-
- @Override
public Result mutateRow(RowMutations rm) throws IOException {
return delegate.mutateRow(rm);
}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
index ae7992f..bddc7a5 100644
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
+++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
@@ -19,7 +19,6 @@
import java.io.IOException;
-import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
@@ -27,11 +26,6 @@
public abstract class CompatOmidTransactionTable implements Table {
@Override
- public RegionLocator getRegionLocator() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
public Result mutateRow(RowMutations rm) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
deleted file mode 100644
index 80a99b3..0000000
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPermissionUtil.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.Permission;
-import org.apache.hadoop.hbase.security.access.UserPermission;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-
-public class CompatPermissionUtil {
-
- private CompatPermissionUtil() {
- //Not to be instantiated
- }
-
- public static AccessChecker newAccessChecker(final Configuration conf, ZKWatcher zk) {
- //Ignore ZK parameter
- return new AccessChecker(conf);
- }
-
- public static void stopAccessChecker(AccessChecker accessChecker) throws IOException {
- //NOOP
- }
-
- public static String getUserFromUP(UserPermission userPermission) {
- return userPermission.getUser();
- }
-
- public static Permission getPermissionFromUP(UserPermission userPermission) {
- return userPermission.getPermission();
- }
-
- public static boolean authorizeUserTable(AccessChecker accessChecker, User user,
- TableName table, Permission.Action action) {
- // This also checks for group access
- return accessChecker.getAuthManager().authorizeUserTable(user, table, action);
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
index b749a25..4ed9c91 100644
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
+++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
@@ -25,30 +25,4 @@
*/
public abstract class CompatPhoenixRpcScheduler extends RpcScheduler {
protected RpcScheduler delegate;
-
- @Override
- public int getMetaPriorityQueueLength() {
- return this.delegate.getMetaPriorityQueueLength();
- }
-
- @Override
- public int getActiveGeneralRpcHandlerCount() {
- return this.delegate.getActiveGeneralRpcHandlerCount();
- }
-
- @Override
- public int getActivePriorityRpcHandlerCount() {
- return this.delegate.getActivePriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveMetaPriorityRpcHandlerCount() {
- return this.delegate.getActiveMetaPriorityRpcHandlerCount();
- }
-
- @Override
- public int getActiveReplicationRpcHandlerCount() {
- return this.delegate.getActiveReplicationRpcHandlerCount();
- }
-
}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
deleted file mode 100644
index 03aa257..0000000
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatStoreFileReader.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.HFileInfo;
-import org.apache.hadoop.hbase.io.hfile.ReaderContext;
-import org.apache.hadoop.hbase.io.hfile.ReaderContext.ReaderType;
-import org.apache.hadoop.hbase.regionserver.StoreFileReader;
-
-public class CompatStoreFileReader extends StoreFileReader {
-
- public CompatStoreFileReader(final FileSystem fs, final Path p,
- final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf,
- boolean primaryReplicaStoreFile, AtomicInteger refCount, final Configuration conf)
- throws IOException {
- super(new ReaderContext(p, in, size, new HFileSystem(fs), primaryReplicaStoreFile,
- ReaderType.STREAM),
- new HFileInfo(new ReaderContext(p, in, size, new HFileSystem(fs),
- primaryReplicaStoreFile, ReaderType.STREAM), conf),
- cacheConf, refCount, conf);
- getHFileReader().getHFileInfo().initMetaAndIndex(getHFileReader());
- }
-
-}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 820a728..d52608c 100644
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -55,15 +55,6 @@
//Not to be instantiated
}
- public static int getCellSerializedSize(Cell cell) {
- return cell.getSerializedSize();
- }
-
- public static ListMultimap<String, ? extends Permission> readPermissions(
- byte[] data, Configuration conf) throws DeserializationException {
- return PermissionStorage.readPermissions(data, conf);
- }
-
public static HFileContext createHFileContext(Configuration conf, Algorithm compression,
Integer blockSize, DataBlockEncoding encoding, CellComparator comparator) {
@@ -76,63 +67,10 @@
.build();
}
- public static HFileContextBuilder withComparator(HFileContextBuilder contextBuilder,
- CellComparatorImpl cellComparator) {
- return contextBuilder.withCellComparator(cellComparator);
- }
-
- public static StoreFileWriter.Builder withComparator(StoreFileWriter.Builder builder,
- CellComparatorImpl cellComparator) {
- return builder;
- }
-
public static Scan getScanForTableName(Connection conn, TableName tableName) {
return MetaTableAccessor.getScanForTableName(conn.getConfiguration(), tableName);
}
-
- /**
- * HBase 2.3+ has storeRefCount available in RegionMetrics
- *
- * @param admin Admin instance
- * @return true if any region has refCount leakage
- * @throws IOException if something went wrong while connecting to Admin
- */
- public synchronized static boolean isAnyStoreRefCountLeaked(Admin admin)
- throws IOException {
- int retries = 5;
- while (retries > 0) {
- boolean isStoreRefCountLeaked = isStoreRefCountLeaked(admin);
- if (!isStoreRefCountLeaked) {
- return false;
- }
- retries--;
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- LOGGER.error("Interrupted while sleeping", e);
- break;
- }
- }
- return true;
- }
-
- private static boolean isStoreRefCountLeaked(Admin admin)
- throws IOException {
- for (ServerName serverName : admin.getRegionServers()) {
- for (RegionMetrics regionMetrics : admin.getRegionMetrics(serverName)) {
- int regionTotalRefCount = regionMetrics.getStoreRefCount();
- if (regionTotalRefCount > 0) {
- LOGGER.error("Region {} has refCount leak. Total refCount"
- + " of all storeFiles combined for the region: {}",
- regionMetrics.getNameAsString(), regionTotalRefCount);
- return true;
- }
- }
- }
- return false;
- }
-
public static ChecksumType getChecksumType(Configuration conf) {
return StoreUtils.getChecksumType(conf);
}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
index 80aafe6..bf4c3c9 100644
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
+++ b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
@@ -20,20 +20,7 @@
public class HbaseCompatCapabilities {
- public static boolean isMaxLookbackTimeSupported() {
- return true;
- }
-
- //In HBase 2.1 and 2.2, a lookback query won't return any results if covered by a future delete,
- //but in 2.3 and later we have the preSoreScannerOpen hook that overrides that behavior
- public static boolean isLookbackBeyondDeletesSupported() { return true; }
-
- //HBase 2.1 does not have HBASE-22710, which is necessary for raw scan skip scan and
- // AllVersionsIndexRebuild filters to
- // show all versions properly. HBase 2.2.5+ and HBase 2.3.0+ have this fix.
- public static boolean isRawFilterSupported() { return true; }
-
- //HBase 2.3+ has preWALAppend() on RegionObserver (HBASE-22623)
- public static boolean hasPreWALAppend() { return true; }
+ // Currently each supported HBase version has the same capabilities, so there is
+ // nothing in here.
}
\ No newline at end of file
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
deleted file mode 100644
index c5485a5..0000000
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/OffsetCell.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class OffsetCell implements Cell {
-
- private Cell cell;
- private int offset;
-
- public OffsetCell(Cell cell, int offset) {
- this.cell = cell;
- this.offset = offset;
- }
-
- @Override
- public byte[] getRowArray() {
- return cell.getRowArray();
- }
-
- @Override
- public int getRowOffset() {
- return cell.getRowOffset() + offset;
- }
-
- @Override
- public short getRowLength() {
- return (short) (cell.getRowLength() - offset);
- }
-
- @Override
- public byte[] getFamilyArray() {
- return cell.getFamilyArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return cell.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return cell.getFamilyLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return cell.getQualifierArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return cell.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return cell.getQualifierLength();
- }
-
- @Override
- public long getTimestamp() {
- return cell.getTimestamp();
- }
-
- @Override
- public byte getTypeByte() {
- return cell.getTypeByte();
- }
-
- @Override public long getSequenceId() {
- return cell.getSequenceId();
- }
-
- @Override
- public byte[] getValueArray() {
- return cell.getValueArray();
- }
-
- @Override
- public int getValueOffset() {
- return cell.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return cell.getValueLength();
- }
-
- @Override
- public byte[] getTagsArray() {
- return cell.getTagsArray();
- }
-
- @Override
- public int getTagsOffset() {
- return cell.getTagsOffset();
- }
-
- @Override
- public int getTagsLength() {
- return cell.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return cell.getType();
- }
-
- @Override
- public long heapSize() {
- return cell.heapSize();
- }
-
- @Override
- public int getSerializedSize() {
- return cell.getSerializedSize() - offset;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
deleted file mode 100644
index cd1a7f5..0000000
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatBaseScannerRegionObserver.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeepDeletedCells;
-import org.apache.hadoop.hbase.MemoryCompactionPolicy;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.ScanOptions;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import java.io.IOException;
-
-public class CompatBaseScannerRegionObserver implements RegionObserver {
-
- public static final String PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY =
- "phoenix.max.lookback.age.seconds";
- public static final int DEFAULT_PHOENIX_MAX_LOOKBACK_AGE = 0;
-
- public void preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
- CompactionRequest request) throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
- }
- }
-
- public void preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
- ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- setScanOptionsForFlushesAndCompactions(conf, options, store, ScanType.COMPACT_RETAIN_DELETES);
- }
- }
-
- public void preMemStoreCompactionCompactScannerOpen(
- ObserverContext<RegionCoprocessorEnvironment> c, Store store, ScanOptions options)
- throws IOException {
- Configuration conf = c.getEnvironment().getConfiguration();
- if (isMaxLookbackTimeEnabled(conf)) {
- MemoryCompactionPolicy inMemPolicy =
- store.getColumnFamilyDescriptor().getInMemoryCompaction();
- ScanType scanType;
- //the eager and adaptive in-memory compaction policies can purge versions; the others
- // can't. (Eager always does; adaptive sometimes does)
- if (inMemPolicy.equals(MemoryCompactionPolicy.EAGER) ||
- inMemPolicy.equals(MemoryCompactionPolicy.ADAPTIVE)) {
- scanType = ScanType.COMPACT_DROP_DELETES;
- } else {
- scanType = ScanType.COMPACT_RETAIN_DELETES;
- }
- setScanOptionsForFlushesAndCompactions(conf, options, store, scanType);
- }
- }
-
- public void preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, Store store,
- ScanOptions options) throws IOException {
-
- if (!storeFileScanDoesntNeedAlteration(options)) {
- //PHOENIX-4277 -- When doing a point-in-time (SCN) Scan, HBase by default will hide
- // mutations that happen before a delete marker. This overrides that behavior.
- options.setMinVersions(options.getMinVersions());
- KeepDeletedCells keepDeletedCells = KeepDeletedCells.TRUE;
- if (store.getColumnFamilyDescriptor().getTimeToLive() != HConstants.FOREVER) {
- keepDeletedCells = KeepDeletedCells.TTL;
- }
- options.setKeepDeletedCells(keepDeletedCells);
- }
- }
-
- private boolean storeFileScanDoesntNeedAlteration(ScanOptions options) {
- Scan scan = options.getScan();
- boolean isRaw = scan.isRaw();
- //true if keep deleted cells is either TRUE or TTL
- boolean keepDeletedCells = options.getKeepDeletedCells().equals(KeepDeletedCells.TRUE) ||
- options.getKeepDeletedCells().equals(KeepDeletedCells.TTL);
- boolean timeRangeIsLatest = scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP;
- boolean timestampIsTransactional =
- isTransactionalTimestamp(scan.getTimeRange().getMax());
- return isRaw
- || keepDeletedCells
- || timeRangeIsLatest
- || timestampIsTransactional;
- }
-
- private boolean isTransactionalTimestamp(long ts) {
- //have to use the HBase edge manager because the Phoenix one is in phoenix-core
- return ts > (long) (EnvironmentEdgeManager.currentTime() * 1.1);
- }
-
- /*
- * If KeepDeletedCells.FALSE, KeepDeletedCells.TTL ,
- * let delete markers age once lookback age is done.
- */
- public KeepDeletedCells getKeepDeletedCells(ScanOptions options, ScanType scanType) {
- //if we're doing a minor compaction or flush, always set keep deleted cells
- //to true. Otherwise, if keep deleted cells is false or TTL, use KeepDeletedCells TTL,
- //where the value of the ttl might be overriden to the max lookback age elsewhere
- return (options.getKeepDeletedCells() == KeepDeletedCells.TRUE
- || scanType.equals(ScanType.COMPACT_RETAIN_DELETES)) ?
- KeepDeletedCells.TRUE : KeepDeletedCells.TTL;
- }
-
- /*
- * if the user set a TTL we should leave MIN_VERSIONS at the default (0 in most of the cases).
- * Otherwise the data (1st version) will not be removed after the TTL. If no TTL, we want
- * Math.max(maxVersions, minVersions, 1)
- */
- public int getMinVersions(ScanOptions options, ColumnFamilyDescriptor cfDescriptor) {
- return cfDescriptor.getTimeToLive() != HConstants.FOREVER ? options.getMinVersions()
- : Math.max(Math.max(options.getMinVersions(),
- cfDescriptor.getMaxVersions()),1);
- }
-
- /**
- *
- * @param conf HBase Configuration
- * @param columnDescriptor ColumnFamilyDescriptor for the store being compacted
- * @param options ScanOptions of overrides to the compaction scan
- * @return Time to live in milliseconds, based on both HBase TTL and Phoenix max lookback age
- */
- public long getTimeToLiveForCompactions(Configuration conf,
- ColumnFamilyDescriptor columnDescriptor,
- ScanOptions options) {
- long ttlConfigured = columnDescriptor.getTimeToLive();
- long ttlInMillis = ttlConfigured * 1000;
- long maxLookbackTtl = getMaxLookbackInMillis(conf);
- if (isMaxLookbackTimeEnabled(maxLookbackTtl)) {
- if (ttlConfigured == HConstants.FOREVER
- && columnDescriptor.getKeepDeletedCells() != KeepDeletedCells.TRUE) {
- // If user configured default TTL(FOREVER) and keep deleted cells to false or
- // TTL then to remove unwanted delete markers we should change ttl to max lookback age
- ttlInMillis = maxLookbackTtl;
- } else {
- //if there is a TTL, use TTL instead of max lookback age.
- // Max lookback age should be more recent or equal to TTL
- ttlInMillis = Math.max(ttlInMillis, maxLookbackTtl);
- }
- }
-
- return ttlInMillis;
- }
-
- public void setScanOptionsForFlushesAndCompactions(Configuration conf,
- ScanOptions options,
- final Store store,
- ScanType type) {
- ColumnFamilyDescriptor cfDescriptor = store.getColumnFamilyDescriptor();
- options.setTTL(getTimeToLiveForCompactions(conf, cfDescriptor,
- options));
- options.setKeepDeletedCells(getKeepDeletedCells(options, type));
- options.setMaxVersions(Integer.MAX_VALUE);
- options.setMinVersions(getMinVersions(options, cfDescriptor));
- }
-
- public static long getMaxLookbackInMillis(Configuration conf){
- //config param is in seconds, switch to millis
- return conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE) * 1000;
- }
-
- public static boolean isMaxLookbackTimeEnabled(Configuration conf){
- return isMaxLookbackTimeEnabled(conf.getLong(PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
- DEFAULT_PHOENIX_MAX_LOOKBACK_AGE));
- }
-
- public static boolean isMaxLookbackTimeEnabled(long maxLookbackTime){
- return maxLookbackTime > 0L;
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
deleted file mode 100644
index f887ed3..0000000
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/coprocessor/CompatIndexRegionObserver.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.coprocessor;
-
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.wal.WALKey;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class CompatIndexRegionObserver implements RegionObserver {
-
- public static void appendToWALKey(WALKey key, String attrKey, byte[] attrValue) {
- key.addExtendedAttribute(attrKey, attrValue);
- }
-
- public static byte[] getAttributeValueFromWALKey(WALKey key, String attrKey) {
- return key.getExtendedAttribute(attrKey);
- }
-
- public static Map<String, byte[]> getAttributeValuesFromWALKey(WALKey key) {
- return new HashMap<String, byte[]>(key.getExtendedAttributes());
- }
-
-}
diff --git a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
deleted file mode 100644
index 0c7dfd8..0000000
--- a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/test/DelegateCell.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.compat.hbase.test;
-
-import org.apache.hadoop.hbase.Cell;
-
-public class DelegateCell implements Cell {
- private final Cell delegate;
- private final String name;
- public DelegateCell(Cell delegate, String name) {
- this.delegate = delegate;
- this.name = name;
- }
-
- @Override
- public int getValueOffset() {
- return delegate.getValueOffset();
- }
-
- @Override
- public int getValueLength() {
- return delegate.getValueLength();
- }
-
- @Override
- public byte[] getValueArray() {
- return delegate.getValueArray();
- }
-
- @Override
- public byte getTypeByte() {
- return delegate.getTypeByte();
- }
-
- @Override
- public long getTimestamp() {
- return delegate.getTimestamp();
- }
-
- @Override
- public int getTagsOffset() {
- return delegate.getTagsOffset();
- }
-
- @Override
- public byte[] getTagsArray() {
- return delegate.getTagsArray();
- }
-
- @Override
- public int getRowOffset() {
- return delegate.getRowOffset();
- }
-
- @Override
- public short getRowLength() {
- return delegate.getRowLength();
- }
-
- @Override
- public byte[] getRowArray() {
- return delegate.getRowArray();
- }
-
- @Override
- public int getQualifierOffset() {
- return delegate.getQualifierOffset();
- }
-
- @Override
- public int getQualifierLength() {
- return delegate.getQualifierLength();
- }
-
- @Override
- public byte[] getQualifierArray() {
- return delegate.getQualifierArray();
- }
-
- @Override
- public int getFamilyOffset() {
- return delegate.getFamilyOffset();
- }
-
- @Override
- public byte getFamilyLength() {
- return delegate.getFamilyLength();
- }
-
- @Override
- public byte[] getFamilyArray() {
- return delegate.getFamilyArray();
- }
-
- @Override
- public String toString() {
- return name;
- }
-
- @Override
- public long getSequenceId() {
- return delegate.getSequenceId();
- }
-
- @Override
- public int getTagsLength() {
- return delegate.getTagsLength();
- }
-
- @Override
- public Type getType() {
- return delegate.getType();
- }
-
- @Override
- public long heapSize() {
- return delegate.heapSize();
- }
-
- @Override
- public int getSerializedSize() {
- return delegate.getSerializedSize();
- }
-}
diff --git a/pom.xml b/pom.xml
index 35fc769..4ed9524 100644
--- a/pom.xml
+++ b/pom.xml
@@ -43,8 +43,6 @@
<module>phoenix-hbase-compat-2.4.1</module>
<module>phoenix-hbase-compat-2.4.0</module>
<module>phoenix-hbase-compat-2.3.0</module>
- <module>phoenix-hbase-compat-2.2.5</module>
- <module>phoenix-hbase-compat-2.1.6</module>
<module>phoenix-core</module>
<module>phoenix-pherf</module>
<module>phoenix-tracing-webapp</module>
@@ -82,10 +80,8 @@
<hbase.suffix>hbase-${hbase.profile}</hbase.suffix>
<!-- This is used by the release script only -->
- <hbase.profile.list>2.1 2.2 2.3 2.4.0 2.4</hbase.profile.list>
+ <hbase.profile.list>2.3 2.4.0 2.4</hbase.profile.list>
<!-- The default hbase versions to build with (override with hbase.version) -->
- <hbase-2.1.runtime.version>2.1.10</hbase-2.1.runtime.version>
- <hbase-2.2.runtime.version>2.2.7</hbase-2.2.runtime.version>
<hbase-2.3.runtime.version>2.3.7</hbase-2.3.runtime.version>
<hbase-2.4.0.runtime.version>2.4.0</hbase-2.4.0.runtime.version>
<hbase-2.4.runtime.version>2.4.11</hbase-2.4.runtime.version>
@@ -1541,42 +1537,6 @@
</properties>
</profile>
<profile>
- <!-- PHOENIX-5993 may work with the public HBase artifacts, as the test don't trip over
- anything but it should be rebuilt like 2.2+ -->
- <id>phoenix-hbase-compat-2.1.6</id>
- <activation>
- <activeByDefault>true</activeByDefault>
- <property>
- <name>hbase.profile</name>
- <value>2.1</value>
- </property>
- </activation>
- <properties>
- <hbase.profile>2.1</hbase.profile>
- <hbase.compat.version>2.1.6</hbase.compat.version>
- <hbase.version>${hbase-2.1.runtime.version}</hbase.version>
- <hadoop.version>3.0.3</hadoop.version>
- <tephra.hbase.compat.version>2.1</tephra.hbase.compat.version>
- </properties>
- </profile>
- <profile>
- <!-- PHOENIX-5993 This won't work with the public HBase artifacts -->
- <id>phoenix-hbase-compat-2.2.5</id>
- <activation>
- <property>
- <name>hbase.profile</name>
- <value>2.2</value>
- </property>
- </activation>
- <properties>
- <hbase.profile>2.2</hbase.profile>
- <hbase.compat.version>2.2.5</hbase.compat.version>
- <hbase.version>${hbase-2.2.runtime.version}</hbase.version>
- <hadoop.version>3.1.3</hadoop.version>
- <tephra.hbase.compat.version>2.2</tephra.hbase.compat.version>
- </properties>
- </profile>
- <profile>
<!-- PHOENIX-5993 This won't work with the public HBase artifacts -->
<id>phoenix-hbase-compat-2.3.0</id>
<activation>