HBASE-28811 Use region server configuration for evicting the cache while unassigning a region (#6197)

Signed-off-by: Wellington Chevreuil <wchevreuil@apache.org>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java
index 9a38952..344d7e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.java
@@ -45,17 +45,17 @@
   // wrong(but do not make it wrong intentionally). The client can handle this error.
   private ServerName assignCandidate;
 
-  private boolean evictCache;
+  private boolean isSplit;
 
   public CloseRegionProcedure() {
     super();
   }
 
   public CloseRegionProcedure(TransitRegionStateProcedure parent, RegionInfo region,
-    ServerName targetServer, ServerName assignCandidate, boolean evictCache) {
+    ServerName targetServer, ServerName assignCandidate, boolean isSplit) {
     super(parent, region, targetServer);
     this.assignCandidate = assignCandidate;
-    this.evictCache = evictCache;
+    this.isSplit = isSplit;
   }
 
   @Override
@@ -65,7 +65,7 @@
 
   @Override
   public RemoteOperation newRemoteOperation(MasterProcedureEnv env) {
-    return new RegionCloseOperation(this, region, getProcId(), assignCandidate, evictCache,
+    return new RegionCloseOperation(this, region, getProcId(), assignCandidate, isSplit,
       env.getMasterServices().getMasterActiveTime());
   }
 
@@ -76,7 +76,7 @@
     if (assignCandidate != null) {
       builder.setAssignCandidate(ProtobufUtil.toServerName(assignCandidate));
     }
-    builder.setEvictCache(evictCache);
+    builder.setEvictCache(isSplit);
     serializer.serialize(builder.build());
   }
 
@@ -88,7 +88,7 @@
     if (data.hasAssignCandidate()) {
       assignCandidate = ProtobufUtil.toServerName(data.getAssignCandidate());
     }
-    evictCache = data.getEvictCache();
+    isSplit = data.getEvictCache();
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
index 0ed740c..e195296 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
@@ -372,14 +372,14 @@
   }
 
   private void closeRegionAfterUpdatingMeta(MasterProcedureEnv env, RegionStateNode regionNode) {
-    CloseRegionProcedure closeProc =
-      isSplit
-        ? new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(),
-          assignCandidate,
-          env.getMasterConfiguration().getBoolean(EVICT_BLOCKS_ON_SPLIT_KEY,
-            DEFAULT_EVICT_ON_SPLIT))
-        : new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(),
-          assignCandidate, evictCache);
+    LOG.debug("Close region: isSplit: {}: evictOnSplit: {}: evictOnClose: {}", isSplit,
+      env.getMasterConfiguration().getBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, DEFAULT_EVICT_ON_SPLIT),
+      evictCache);
+    // Splits/Merges are special cases, rather than deciding on the cache eviction behaviour here at
+    // Master, we just need to tell this close is for a split/merge and let RSes decide on the
+    // eviction. See HBASE-28811 for more context.
+    CloseRegionProcedure closeProc = new CloseRegionProcedure(this, getRegion(),
+      regionNode.getRegionLocation(), assignCandidate, isSplit);
     addChildProcedure(closeProc);
     setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_CLOSED);
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java
index 8459b0b..815d8be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/UnassignRegionHandler.java
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hbase.regionserver.handler;
 
+import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_EVICT_ON_CLOSE;
+import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_EVICT_ON_SPLIT;
+import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY;
+import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_SPLIT_KEY;
+
 import edu.umd.cs.findbugs.annotations.Nullable;
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
@@ -61,21 +66,21 @@
 
   private final RetryCounter retryCounter;
 
-  private boolean evictCache;
+  private boolean isSplit;
 
   // active time of the master that sent this unassign request, used for fencing
   private final long initiatingMasterActiveTime;
 
   public UnassignRegionHandler(HRegionServer server, String encodedName, long closeProcId,
     boolean abort, @Nullable ServerName destination, EventType eventType,
-    long initiatingMasterActiveTime, boolean evictCache) {
+    long initiatingMasterActiveTime, boolean isSplit) {
     super(server, eventType);
     this.encodedName = encodedName;
     this.closeProcId = closeProcId;
     this.abort = abort;
     this.destination = destination;
     this.retryCounter = HandlerUtil.getRetryCounter();
-    this.evictCache = evictCache;
+    this.isSplit = isSplit;
     this.initiatingMasterActiveTime = initiatingMasterActiveTime;
   }
 
@@ -125,7 +130,11 @@
     }
     // This should be true only in the case of splits/merges closing the parent regions, as
     // there's no point on keep blocks for those region files.
-    region.getStores().forEach(s -> s.getCacheConfig().setEvictOnClose(evictCache));
+    final boolean evictCacheOnClose = isSplit
+      ? server.getConfiguration().getBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, DEFAULT_EVICT_ON_SPLIT)
+      : server.getConfiguration().getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE);
+    LOG.debug("Unassign region: split region: {}: evictCache: {}", isSplit, evictCacheOnClose);
+    region.getStores().forEach(s -> s.getCacheConfig().setEvictOnClose(evictCacheOnClose));
 
     if (region.close(abort) == null) {
       // XXX: Is this still possible? The old comment says about split, but now split is done at
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
similarity index 62%
rename from hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java
rename to hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
index c308d5f..34c8f07 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
@@ -20,10 +20,12 @@
 import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
 import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
 import static org.apache.hadoop.hbase.io.hfile.CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY;
+import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY;
 import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_SPLIT_KEY;
 import static org.apache.hadoop.hbase.io.hfile.CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -47,13 +49,13 @@
 import org.slf4j.LoggerFactory;
 
 @Category({ MiscTests.class, MediumTests.class })
-public class TestSplitWithCache {
+public class TestCacheEviction {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-    HBaseClassTestRule.forClass(TestSplitWithCache.class);
+    HBaseClassTestRule.forClass(TestCacheEviction.class);
 
-  private static final Logger LOG = LoggerFactory.getLogger(TestSplitWithCache.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestCacheEviction.class);
 
   private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
 
@@ -69,42 +71,44 @@
 
   @Test
   public void testEvictOnSplit() throws Exception {
-    doTest("testEvictOnSplit", true,
+    doTestEvictOnSplit("testEvictOnSplit", true,
       (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
       (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) == null));
   }
 
   @Test
   public void testDoesntEvictOnSplit() throws Exception {
-    doTest("testDoesntEvictOnSplit", false,
+    doTestEvictOnSplit("testDoesntEvictOnSplit", false,
       (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
       (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null));
   }
 
-  private void doTest(String table, boolean evictOnSplit,
+  @Test
+  public void testEvictOnClose() throws Exception {
+    doTestEvictOnClose("testEvictOnClose", true,
+      (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
+      (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) == null));
+  }
+
+  @Test
+  public void testDoesntEvictOnClose() throws Exception {
+    doTestEvictOnClose("testDoesntEvictOnClose", false,
+      (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
+      (f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null));
+  }
+
+  private void doTestEvictOnSplit(String table, boolean evictOnSplit,
     BiConsumer<String, Map<String, Pair<String, Long>>> predicateBeforeSplit,
     BiConsumer<String, Map<String, Pair<String, Long>>> predicateAfterSplit) throws Exception {
-    UTIL.getConfiguration().setBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, evictOnSplit);
     UTIL.startMiniCluster(1);
     try {
       TableName tableName = TableName.valueOf(table);
-      byte[] family = Bytes.toBytes("CF");
-      TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
-        .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
-      UTIL.getAdmin().createTable(td);
-      UTIL.waitTableAvailable(tableName);
-      Table tbl = UTIL.getConnection().getTable(tableName);
-      List<Put> puts = new ArrayList<>();
-      for (int i = 0; i < 1000; i++) {
-        Put p = new Put(Bytes.toBytes("row-" + i));
-        p.addColumn(family, Bytes.toBytes(1), Bytes.toBytes("val-" + i));
-        puts.add(p);
-      }
-      tbl.put(puts);
-      UTIL.getAdmin().flush(tableName);
+      createAndCacheTable(tableName);
       Collection<HStoreFile> files =
         UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles();
       checkCacheForBlocks(tableName, files, predicateBeforeSplit);
+      UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration()
+        .setBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, evictOnSplit);
       UTIL.getAdmin().split(tableName, Bytes.toBytes("row-500"));
       Waiter.waitFor(UTIL.getConfiguration(), 30000,
         () -> UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2);
@@ -113,7 +117,43 @@
     } finally {
       UTIL.shutdownMiniCluster();
     }
+  }
 
+  private void doTestEvictOnClose(String table, boolean evictOnClose,
+    BiConsumer<String, Map<String, Pair<String, Long>>> predicateBeforeClose,
+    BiConsumer<String, Map<String, Pair<String, Long>>> predicateAfterClose) throws Exception {
+    UTIL.startMiniCluster(1);
+    try {
+      TableName tableName = TableName.valueOf(table);
+      createAndCacheTable(tableName);
+      Collection<HStoreFile> files =
+        UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles();
+      checkCacheForBlocks(tableName, files, predicateBeforeClose);
+      UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration()
+        .setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, evictOnClose);
+      UTIL.getAdmin().disableTable(tableName);
+      UTIL.waitUntilNoRegionsInTransition();
+      checkCacheForBlocks(tableName, files, predicateAfterClose);
+    } finally {
+      UTIL.shutdownMiniCluster();
+    }
+  }
+
+  private void createAndCacheTable(TableName tableName) throws IOException, InterruptedException {
+    byte[] family = Bytes.toBytes("CF");
+    TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+      .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
+    UTIL.getAdmin().createTable(td);
+    UTIL.waitTableAvailable(tableName);
+    Table tbl = UTIL.getConnection().getTable(tableName);
+    List<Put> puts = new ArrayList<>();
+    for (int i = 0; i < 1000; i++) {
+      Put p = new Put(Bytes.toBytes("row-" + i));
+      p.addColumn(family, Bytes.toBytes(1), Bytes.toBytes("val-" + i));
+      puts.add(p);
+    }
+    tbl.put(puts);
+    UTIL.getAdmin().flush(tableName);
   }
 
   private void checkCacheForBlocks(TableName tableName, Collection<HStoreFile> files,