blob: 5e269be5689c1ca7e0c1a70d8e185584d7f452bc [file] [log] [blame]
<!DOCTYPE HTML>
<html lang="en">
<head>
<!-- Generated by javadoc (17) -->
<title>Source code</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="source: package: org.apache.hadoop.hbase, class: TestIOFencing, class: BlockCompactionsInCompletionRegion">
<meta name="generator" content="javadoc/SourceToHTMLConverter">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
</head>
<body class="source-page">
<main role="main">
<div class="source-container">
<pre><span class="source-line-no">001</span><span id="line-1">/*</span>
<span class="source-line-no">002</span><span id="line-2"> * Licensed to the Apache Software Foundation (ASF) under one</span>
<span class="source-line-no">003</span><span id="line-3"> * or more contributor license agreements. See the NOTICE file</span>
<span class="source-line-no">004</span><span id="line-4"> * distributed with this work for additional information</span>
<span class="source-line-no">005</span><span id="line-5"> * regarding copyright ownership. The ASF licenses this file</span>
<span class="source-line-no">006</span><span id="line-6"> * to you under the Apache License, Version 2.0 (the</span>
<span class="source-line-no">007</span><span id="line-7"> * "License"); you may not use this file except in compliance</span>
<span class="source-line-no">008</span><span id="line-8"> * with the License. You may obtain a copy of the License at</span>
<span class="source-line-no">009</span><span id="line-9"> *</span>
<span class="source-line-no">010</span><span id="line-10"> * http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="source-line-no">011</span><span id="line-11"> *</span>
<span class="source-line-no">012</span><span id="line-12"> * Unless required by applicable law or agreed to in writing, software</span>
<span class="source-line-no">013</span><span id="line-13"> * distributed under the License is distributed on an "AS IS" BASIS,</span>
<span class="source-line-no">014</span><span id="line-14"> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="source-line-no">015</span><span id="line-15"> * See the License for the specific language governing permissions and</span>
<span class="source-line-no">016</span><span id="line-16"> * limitations under the License.</span>
<span class="source-line-no">017</span><span id="line-17"> */</span>
<span class="source-line-no">018</span><span id="line-18">package org.apache.hadoop.hbase;</span>
<span class="source-line-no">019</span><span id="line-19"></span>
<span class="source-line-no">020</span><span id="line-20">import static org.junit.Assert.assertEquals;</span>
<span class="source-line-no">021</span><span id="line-21">import static org.junit.Assert.assertTrue;</span>
<span class="source-line-no">022</span><span id="line-22"></span>
<span class="source-line-no">023</span><span id="line-23">import java.io.IOException;</span>
<span class="source-line-no">024</span><span id="line-24">import java.util.List;</span>
<span class="source-line-no">025</span><span id="line-25">import java.util.concurrent.CountDownLatch;</span>
<span class="source-line-no">026</span><span id="line-26">import java.util.concurrent.atomic.AtomicInteger;</span>
<span class="source-line-no">027</span><span id="line-27">import org.apache.hadoop.conf.Configuration;</span>
<span class="source-line-no">028</span><span id="line-28">import org.apache.hadoop.fs.FileSystem;</span>
<span class="source-line-no">029</span><span id="line-29">import org.apache.hadoop.fs.Path;</span>
<span class="source-line-no">030</span><span id="line-30">import org.apache.hadoop.hbase.client.Admin;</span>
<span class="source-line-no">031</span><span id="line-31">import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;</span>
<span class="source-line-no">032</span><span id="line-32">import org.apache.hadoop.hbase.client.RegionInfo;</span>
<span class="source-line-no">033</span><span id="line-33">import org.apache.hadoop.hbase.client.RegionInfoBuilder;</span>
<span class="source-line-no">034</span><span id="line-34">import org.apache.hadoop.hbase.client.Table;</span>
<span class="source-line-no">035</span><span id="line-35">import org.apache.hadoop.hbase.client.TableDescriptor;</span>
<span class="source-line-no">036</span><span id="line-36">import org.apache.hadoop.hbase.regionserver.CompactingMemStore;</span>
<span class="source-line-no">037</span><span id="line-37">import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;</span>
<span class="source-line-no">038</span><span id="line-38">import org.apache.hadoop.hbase.regionserver.HRegion;</span>
<span class="source-line-no">039</span><span id="line-39">import org.apache.hadoop.hbase.regionserver.HRegionServer;</span>
<span class="source-line-no">040</span><span id="line-40">import org.apache.hadoop.hbase.regionserver.HStore;</span>
<span class="source-line-no">041</span><span id="line-41">import org.apache.hadoop.hbase.regionserver.Region;</span>
<span class="source-line-no">042</span><span id="line-42">import org.apache.hadoop.hbase.regionserver.RegionServerServices;</span>
<span class="source-line-no">043</span><span id="line-43">import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;</span>
<span class="source-line-no">044</span><span id="line-44">import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;</span>
<span class="source-line-no">045</span><span id="line-45">import org.apache.hadoop.hbase.regionserver.wal.WALUtil;</span>
<span class="source-line-no">046</span><span id="line-46">import org.apache.hadoop.hbase.security.User;</span>
<span class="source-line-no">047</span><span id="line-47">import org.apache.hadoop.hbase.testclassification.LargeTests;</span>
<span class="source-line-no">048</span><span id="line-48">import org.apache.hadoop.hbase.testclassification.MiscTests;</span>
<span class="source-line-no">049</span><span id="line-49">import org.apache.hadoop.hbase.util.Bytes;</span>
<span class="source-line-no">050</span><span id="line-50">import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;</span>
<span class="source-line-no">051</span><span id="line-51">import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;</span>
<span class="source-line-no">052</span><span id="line-52">import org.apache.hadoop.hbase.wal.WAL;</span>
<span class="source-line-no">053</span><span id="line-53">import org.junit.ClassRule;</span>
<span class="source-line-no">054</span><span id="line-54">import org.junit.Test;</span>
<span class="source-line-no">055</span><span id="line-55">import org.junit.experimental.categories.Category;</span>
<span class="source-line-no">056</span><span id="line-56">import org.slf4j.Logger;</span>
<span class="source-line-no">057</span><span id="line-57">import org.slf4j.LoggerFactory;</span>
<span class="source-line-no">058</span><span id="line-58"></span>
<span class="source-line-no">059</span><span id="line-59">import org.apache.hbase.thirdparty.com.google.common.collect.Lists;</span>
<span class="source-line-no">060</span><span id="line-60"></span>
<span class="source-line-no">061</span><span id="line-61">import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;</span>
<span class="source-line-no">062</span><span id="line-62">import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;</span>
<span class="source-line-no">063</span><span id="line-63"></span>
<span class="source-line-no">064</span><span id="line-64">/**</span>
<span class="source-line-no">065</span><span id="line-65"> * Test for the case where a regionserver going down has enough cycles to do damage to regions that</span>
<span class="source-line-no">066</span><span id="line-66"> * have actually been assigned elsehwere.</span>
<span class="source-line-no">067</span><span id="line-67"> * &lt;p&gt;</span>
<span class="source-line-no">068</span><span id="line-68"> * If we happen to assign a region before it fully done with in its old location -- i.e. it is on</span>
<span class="source-line-no">069</span><span id="line-69"> * two servers at the same time -- all can work fine until the case where the region on the dying</span>
<span class="source-line-no">070</span><span id="line-70"> * server decides to compact or otherwise change the region file set. The region in its new location</span>
<span class="source-line-no">071</span><span id="line-71"> * will then get a surprise when it tries to do something w/ a file removed by the region in its old</span>
<span class="source-line-no">072</span><span id="line-72"> * location on dying server.</span>
<span class="source-line-no">073</span><span id="line-73"> * &lt;p&gt;</span>
<span class="source-line-no">074</span><span id="line-74"> * Making a test for this case is a little tough in that even if a file is deleted up on the</span>
<span class="source-line-no">075</span><span id="line-75"> * namenode, if the file was opened before the delete, it will continue to let reads happen until</span>
<span class="source-line-no">076</span><span id="line-76"> * something changes the state of cached blocks in the dfsclient that was already open (a block from</span>
<span class="source-line-no">077</span><span id="line-77"> * the deleted file is cleaned from the datanode by NN).</span>
<span class="source-line-no">078</span><span id="line-78"> * &lt;p&gt;</span>
<span class="source-line-no">079</span><span id="line-79"> * What we will do below is do an explicit check for existence on the files listed in the region</span>
<span class="source-line-no">080</span><span id="line-80"> * that has had some files removed because of a compaction. This sort of hurry's along and makes</span>
<span class="source-line-no">081</span><span id="line-81"> * certain what is a chance occurance.</span>
<span class="source-line-no">082</span><span id="line-82"> */</span>
<span class="source-line-no">083</span><span id="line-83">@Category({ MiscTests.class, LargeTests.class })</span>
<span class="source-line-no">084</span><span id="line-84">public class TestIOFencing {</span>
<span class="source-line-no">085</span><span id="line-85"></span>
<span class="source-line-no">086</span><span id="line-86"> @ClassRule</span>
<span class="source-line-no">087</span><span id="line-87"> public static final HBaseClassTestRule CLASS_RULE =</span>
<span class="source-line-no">088</span><span id="line-88"> HBaseClassTestRule.forClass(TestIOFencing.class);</span>
<span class="source-line-no">089</span><span id="line-89"></span>
<span class="source-line-no">090</span><span id="line-90"> private static final Logger LOG = LoggerFactory.getLogger(TestIOFencing.class);</span>
<span class="source-line-no">091</span><span id="line-91"> static {</span>
<span class="source-line-no">092</span><span id="line-92"> // Uncomment the following lines if more verbosity is needed for</span>
<span class="source-line-no">093</span><span id="line-93"> // debugging (see HBASE-12285 for details).</span>
<span class="source-line-no">094</span><span id="line-94"> // ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);</span>
<span class="source-line-no">095</span><span id="line-95"> // ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);</span>
<span class="source-line-no">096</span><span id="line-96"> // ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);</span>
<span class="source-line-no">097</span><span id="line-97"> // ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))</span>
<span class="source-line-no">098</span><span id="line-98"> // .getLogger().setLevel(Level.ALL);</span>
<span class="source-line-no">099</span><span id="line-99"> // ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);</span>
<span class="source-line-no">100</span><span id="line-100"> }</span>
<span class="source-line-no">101</span><span id="line-101"></span>
<span class="source-line-no">102</span><span id="line-102"> public abstract static class CompactionBlockerRegion extends HRegion {</span>
<span class="source-line-no">103</span><span id="line-103"> AtomicInteger compactCount = new AtomicInteger();</span>
<span class="source-line-no">104</span><span id="line-104"> volatile CountDownLatch compactionsBlocked = new CountDownLatch(0);</span>
<span class="source-line-no">105</span><span id="line-105"> volatile CountDownLatch compactionsWaiting = new CountDownLatch(0);</span>
<span class="source-line-no">106</span><span id="line-106"></span>
<span class="source-line-no">107</span><span id="line-107"> @SuppressWarnings("deprecation")</span>
<span class="source-line-no">108</span><span id="line-108"> public CompactionBlockerRegion(Path tableDir, WAL log, FileSystem fs, Configuration confParam,</span>
<span class="source-line-no">109</span><span id="line-109"> RegionInfo info, TableDescriptor htd, RegionServerServices rsServices) {</span>
<span class="source-line-no">110</span><span id="line-110"> super(tableDir, log, fs, confParam, info, htd, rsServices);</span>
<span class="source-line-no">111</span><span id="line-111"> }</span>
<span class="source-line-no">112</span><span id="line-112"></span>
<span class="source-line-no">113</span><span id="line-113"> public void stopCompactions() {</span>
<span class="source-line-no">114</span><span id="line-114"> compactionsBlocked = new CountDownLatch(1);</span>
<span class="source-line-no">115</span><span id="line-115"> compactionsWaiting = new CountDownLatch(1);</span>
<span class="source-line-no">116</span><span id="line-116"> }</span>
<span class="source-line-no">117</span><span id="line-117"></span>
<span class="source-line-no">118</span><span id="line-118"> public void allowCompactions() {</span>
<span class="source-line-no">119</span><span id="line-119"> LOG.debug("allowing compactions");</span>
<span class="source-line-no">120</span><span id="line-120"> compactionsBlocked.countDown();</span>
<span class="source-line-no">121</span><span id="line-121"> }</span>
<span class="source-line-no">122</span><span id="line-122"></span>
<span class="source-line-no">123</span><span id="line-123"> public void waitForCompactionToBlock() throws IOException {</span>
<span class="source-line-no">124</span><span id="line-124"> try {</span>
<span class="source-line-no">125</span><span id="line-125"> LOG.debug("waiting for compaction to block");</span>
<span class="source-line-no">126</span><span id="line-126"> compactionsWaiting.await();</span>
<span class="source-line-no">127</span><span id="line-127"> LOG.debug("compaction block reached");</span>
<span class="source-line-no">128</span><span id="line-128"> } catch (InterruptedException ex) {</span>
<span class="source-line-no">129</span><span id="line-129"> throw new IOException(ex);</span>
<span class="source-line-no">130</span><span id="line-130"> }</span>
<span class="source-line-no">131</span><span id="line-131"> }</span>
<span class="source-line-no">132</span><span id="line-132"></span>
<span class="source-line-no">133</span><span id="line-133"> @Override</span>
<span class="source-line-no">134</span><span id="line-134"> public boolean compact(CompactionContext compaction, HStore store,</span>
<span class="source-line-no">135</span><span id="line-135"> ThroughputController throughputController) throws IOException {</span>
<span class="source-line-no">136</span><span id="line-136"> try {</span>
<span class="source-line-no">137</span><span id="line-137"> return super.compact(compaction, store, throughputController);</span>
<span class="source-line-no">138</span><span id="line-138"> } finally {</span>
<span class="source-line-no">139</span><span id="line-139"> compactCount.getAndIncrement();</span>
<span class="source-line-no">140</span><span id="line-140"> }</span>
<span class="source-line-no">141</span><span id="line-141"> }</span>
<span class="source-line-no">142</span><span id="line-142"></span>
<span class="source-line-no">143</span><span id="line-143"> @Override</span>
<span class="source-line-no">144</span><span id="line-144"> public boolean compact(CompactionContext compaction, HStore store,</span>
<span class="source-line-no">145</span><span id="line-145"> ThroughputController throughputController, User user) throws IOException {</span>
<span class="source-line-no">146</span><span id="line-146"> try {</span>
<span class="source-line-no">147</span><span id="line-147"> return super.compact(compaction, store, throughputController, user);</span>
<span class="source-line-no">148</span><span id="line-148"> } finally {</span>
<span class="source-line-no">149</span><span id="line-149"> compactCount.getAndIncrement();</span>
<span class="source-line-no">150</span><span id="line-150"> }</span>
<span class="source-line-no">151</span><span id="line-151"> }</span>
<span class="source-line-no">152</span><span id="line-152"></span>
<span class="source-line-no">153</span><span id="line-153"> public int countStoreFiles() {</span>
<span class="source-line-no">154</span><span id="line-154"> int count = 0;</span>
<span class="source-line-no">155</span><span id="line-155"> for (HStore store : stores.values()) {</span>
<span class="source-line-no">156</span><span id="line-156"> count += store.getStorefilesCount();</span>
<span class="source-line-no">157</span><span id="line-157"> }</span>
<span class="source-line-no">158</span><span id="line-158"> return count;</span>
<span class="source-line-no">159</span><span id="line-159"> }</span>
<span class="source-line-no">160</span><span id="line-160"> }</span>
<span class="source-line-no">161</span><span id="line-161"></span>
<span class="source-line-no">162</span><span id="line-162"> /**</span>
<span class="source-line-no">163</span><span id="line-163"> * An override of HRegion that allows us park compactions in a holding pattern and then when</span>
<span class="source-line-no">164</span><span id="line-164"> * appropriate for the test, allow them proceed again.</span>
<span class="source-line-no">165</span><span id="line-165"> */</span>
<span class="source-line-no">166</span><span id="line-166"> public static class BlockCompactionsInPrepRegion extends CompactionBlockerRegion {</span>
<span class="source-line-no">167</span><span id="line-167"></span>
<span class="source-line-no">168</span><span id="line-168"> public BlockCompactionsInPrepRegion(Path tableDir, WAL log, FileSystem fs,</span>
<span class="source-line-no">169</span><span id="line-169"> Configuration confParam, RegionInfo info, TableDescriptor htd,</span>
<span class="source-line-no">170</span><span id="line-170"> RegionServerServices rsServices) {</span>
<span class="source-line-no">171</span><span id="line-171"> super(tableDir, log, fs, confParam, info, htd, rsServices);</span>
<span class="source-line-no">172</span><span id="line-172"> }</span>
<span class="source-line-no">173</span><span id="line-173"></span>
<span class="source-line-no">174</span><span id="line-174"> @Override</span>
<span class="source-line-no">175</span><span id="line-175"> protected void doRegionCompactionPrep() throws IOException {</span>
<span class="source-line-no">176</span><span id="line-176"> compactionsWaiting.countDown();</span>
<span class="source-line-no">177</span><span id="line-177"> try {</span>
<span class="source-line-no">178</span><span id="line-178"> compactionsBlocked.await();</span>
<span class="source-line-no">179</span><span id="line-179"> } catch (InterruptedException ex) {</span>
<span class="source-line-no">180</span><span id="line-180"> throw new IOException();</span>
<span class="source-line-no">181</span><span id="line-181"> }</span>
<span class="source-line-no">182</span><span id="line-182"> super.doRegionCompactionPrep();</span>
<span class="source-line-no">183</span><span id="line-183"> }</span>
<span class="source-line-no">184</span><span id="line-184"> }</span>
<span class="source-line-no">185</span><span id="line-185"></span>
<span class="source-line-no">186</span><span id="line-186"> /**</span>
<span class="source-line-no">187</span><span id="line-187"> * An override of HRegion that allows us park compactions in a holding pattern and then when</span>
<span class="source-line-no">188</span><span id="line-188"> * appropriate for the test, allow them proceed again. This allows the compaction entry to go the</span>
<span class="source-line-no">189</span><span id="line-189"> * WAL before blocking, but blocks afterwards</span>
<span class="source-line-no">190</span><span id="line-190"> */</span>
<span class="source-line-no">191</span><span id="line-191"> public static class BlockCompactionsInCompletionRegion extends CompactionBlockerRegion {</span>
<span class="source-line-no">192</span><span id="line-192"> public BlockCompactionsInCompletionRegion(Path tableDir, WAL log, FileSystem fs,</span>
<span class="source-line-no">193</span><span id="line-193"> Configuration confParam, RegionInfo info, TableDescriptor htd,</span>
<span class="source-line-no">194</span><span id="line-194"> RegionServerServices rsServices) {</span>
<span class="source-line-no">195</span><span id="line-195"> super(tableDir, log, fs, confParam, info, htd, rsServices);</span>
<span class="source-line-no">196</span><span id="line-196"> }</span>
<span class="source-line-no">197</span><span id="line-197"></span>
<span class="source-line-no">198</span><span id="line-198"> @Override</span>
<span class="source-line-no">199</span><span id="line-199"> protected HStore instantiateHStore(final ColumnFamilyDescriptor family, boolean warmup)</span>
<span class="source-line-no">200</span><span id="line-200"> throws IOException {</span>
<span class="source-line-no">201</span><span id="line-201"> return new BlockCompactionsInCompletionHStore(this, family, this.conf, warmup);</span>
<span class="source-line-no">202</span><span id="line-202"> }</span>
<span class="source-line-no">203</span><span id="line-203"> }</span>
<span class="source-line-no">204</span><span id="line-204"></span>
<span class="source-line-no">205</span><span id="line-205"> public static class BlockCompactionsInCompletionHStore extends HStore {</span>
<span class="source-line-no">206</span><span id="line-206"> CompactionBlockerRegion r;</span>
<span class="source-line-no">207</span><span id="line-207"></span>
<span class="source-line-no">208</span><span id="line-208"> protected BlockCompactionsInCompletionHStore(HRegion region, ColumnFamilyDescriptor family,</span>
<span class="source-line-no">209</span><span id="line-209"> Configuration confParam, boolean warmup) throws IOException {</span>
<span class="source-line-no">210</span><span id="line-210"> super(region, family, confParam, warmup);</span>
<span class="source-line-no">211</span><span id="line-211"> r = (CompactionBlockerRegion) region;</span>
<span class="source-line-no">212</span><span id="line-212"> }</span>
<span class="source-line-no">213</span><span id="line-213"></span>
<span class="source-line-no">214</span><span id="line-214"> @Override</span>
<span class="source-line-no">215</span><span id="line-215"> protected void refreshStoreSizeAndTotalBytes() throws IOException {</span>
<span class="source-line-no">216</span><span id="line-216"> if (r != null) {</span>
<span class="source-line-no">217</span><span id="line-217"> try {</span>
<span class="source-line-no">218</span><span id="line-218"> r.compactionsWaiting.countDown();</span>
<span class="source-line-no">219</span><span id="line-219"> r.compactionsBlocked.await();</span>
<span class="source-line-no">220</span><span id="line-220"> } catch (InterruptedException ex) {</span>
<span class="source-line-no">221</span><span id="line-221"> throw new IOException(ex);</span>
<span class="source-line-no">222</span><span id="line-222"> }</span>
<span class="source-line-no">223</span><span id="line-223"> }</span>
<span class="source-line-no">224</span><span id="line-224"> super.refreshStoreSizeAndTotalBytes();</span>
<span class="source-line-no">225</span><span id="line-225"> }</span>
<span class="source-line-no">226</span><span id="line-226"> }</span>
<span class="source-line-no">227</span><span id="line-227"></span>
<span class="source-line-no">228</span><span id="line-228"> private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();</span>
<span class="source-line-no">229</span><span id="line-229"> private final static TableName TABLE_NAME = TableName.valueOf("tabletest");</span>
<span class="source-line-no">230</span><span id="line-230"> private final static byte[] FAMILY = Bytes.toBytes("family");</span>
<span class="source-line-no">231</span><span id="line-231"> private static final int FIRST_BATCH_COUNT = 4000;</span>
<span class="source-line-no">232</span><span id="line-232"> private static final int SECOND_BATCH_COUNT = FIRST_BATCH_COUNT;</span>
<span class="source-line-no">233</span><span id="line-233"></span>
<span class="source-line-no">234</span><span id="line-234"> /**</span>
<span class="source-line-no">235</span><span id="line-235"> * Test that puts up a regionserver, starts a compaction on a loaded region but holds the</span>
<span class="source-line-no">236</span><span id="line-236"> * compaction until after we have killed the server and the region has come up on a new</span>
<span class="source-line-no">237</span><span id="line-237"> * regionserver altogether. This fakes the double assignment case where region in one location</span>
<span class="source-line-no">238</span><span id="line-238"> * changes the files out from underneath a region being served elsewhere.</span>
<span class="source-line-no">239</span><span id="line-239"> */</span>
<span class="source-line-no">240</span><span id="line-240"> @Test</span>
<span class="source-line-no">241</span><span id="line-241"> public void testFencingAroundCompaction() throws Exception {</span>
<span class="source-line-no">242</span><span id="line-242"> for (MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {</span>
<span class="source-line-no">243</span><span id="line-243"> doTest(BlockCompactionsInPrepRegion.class, policy);</span>
<span class="source-line-no">244</span><span id="line-244"> }</span>
<span class="source-line-no">245</span><span id="line-245"> }</span>
<span class="source-line-no">246</span><span id="line-246"></span>
<span class="source-line-no">247</span><span id="line-247"> /**</span>
<span class="source-line-no">248</span><span id="line-248"> * Test that puts up a regionserver, starts a compaction on a loaded region but holds the</span>
<span class="source-line-no">249</span><span id="line-249"> * compaction completion until after we have killed the server and the region has come up on a new</span>
<span class="source-line-no">250</span><span id="line-250"> * regionserver altogether. This fakes the double assignment case where region in one location</span>
<span class="source-line-no">251</span><span id="line-251"> * changes the files out from underneath a region being served elsewhere.</span>
<span class="source-line-no">252</span><span id="line-252"> */</span>
<span class="source-line-no">253</span><span id="line-253"> @Test</span>
<span class="source-line-no">254</span><span id="line-254"> public void testFencingAroundCompactionAfterWALSync() throws Exception {</span>
<span class="source-line-no">255</span><span id="line-255"> for (MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {</span>
<span class="source-line-no">256</span><span id="line-256"> doTest(BlockCompactionsInCompletionRegion.class, policy);</span>
<span class="source-line-no">257</span><span id="line-257"> }</span>
<span class="source-line-no">258</span><span id="line-258"> }</span>
<span class="source-line-no">259</span><span id="line-259"></span>
<span class="source-line-no">260</span><span id="line-260"> public void doTest(Class&lt;?&gt; regionClass, MemoryCompactionPolicy policy) throws Exception {</span>
<span class="source-line-no">261</span><span id="line-261"> Configuration c = TEST_UTIL.getConfiguration();</span>
<span class="source-line-no">262</span><span id="line-262"> // Insert our custom region</span>
<span class="source-line-no">263</span><span id="line-263"> c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);</span>
<span class="source-line-no">264</span><span id="line-264"> // Encourage plenty of flushes</span>
<span class="source-line-no">265</span><span id="line-265"> c.setLong("hbase.hregion.memstore.flush.size", 25000);</span>
<span class="source-line-no">266</span><span id="line-266"> c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName());</span>
<span class="source-line-no">267</span><span id="line-267"> // Only run compaction when we tell it to</span>
<span class="source-line-no">268</span><span id="line-268"> c.setInt("hbase.hstore.compaction.min", 1);</span>
<span class="source-line-no">269</span><span id="line-269"> c.setInt("hbase.hstore.compactionThreshold", 1000);</span>
<span class="source-line-no">270</span><span id="line-270"> c.setLong("hbase.hstore.blockingStoreFiles", 1000);</span>
<span class="source-line-no">271</span><span id="line-271"> // Compact quickly after we tell it to!</span>
<span class="source-line-no">272</span><span id="line-272"> c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);</span>
<span class="source-line-no">273</span><span id="line-273"> c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy));</span>
<span class="source-line-no">274</span><span id="line-274"> LOG.info("Starting mini cluster");</span>
<span class="source-line-no">275</span><span id="line-275"> TEST_UTIL.startMiniCluster(1);</span>
<span class="source-line-no">276</span><span id="line-276"> CompactionBlockerRegion compactingRegion = null;</span>
<span class="source-line-no">277</span><span id="line-277"> Admin admin = null;</span>
<span class="source-line-no">278</span><span id="line-278"> try {</span>
<span class="source-line-no">279</span><span id="line-279"> LOG.info("Creating admin");</span>
<span class="source-line-no">280</span><span id="line-280"> admin = TEST_UTIL.getConnection().getAdmin();</span>
<span class="source-line-no">281</span><span id="line-281"> LOG.info("Creating table");</span>
<span class="source-line-no">282</span><span id="line-282"> TEST_UTIL.createTable(TABLE_NAME, FAMILY);</span>
<span class="source-line-no">283</span><span id="line-283"> Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);</span>
<span class="source-line-no">284</span><span id="line-284"> LOG.info("Loading test table");</span>
<span class="source-line-no">285</span><span id="line-285"> // Find the region</span>
<span class="source-line-no">286</span><span id="line-286"> List&lt;HRegion&gt; testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME);</span>
<span class="source-line-no">287</span><span id="line-287"> assertEquals(1, testRegions.size());</span>
<span class="source-line-no">288</span><span id="line-288"> compactingRegion = (CompactionBlockerRegion) testRegions.get(0);</span>
<span class="source-line-no">289</span><span id="line-289"> LOG.info("Blocking compactions");</span>
<span class="source-line-no">290</span><span id="line-290"> compactingRegion.stopCompactions();</span>
<span class="source-line-no">291</span><span id="line-291"> long lastFlushTime = compactingRegion.getEarliestFlushTimeForAllStores();</span>
<span class="source-line-no">292</span><span id="line-292"> // Load some rows</span>
<span class="source-line-no">293</span><span id="line-293"> TEST_UTIL.loadNumericRows(table, FAMILY, 0, FIRST_BATCH_COUNT);</span>
<span class="source-line-no">294</span><span id="line-294"></span>
<span class="source-line-no">295</span><span id="line-295"> // add a compaction from an older (non-existing) region to see whether we successfully skip</span>
<span class="source-line-no">296</span><span id="line-296"> // those entries</span>
<span class="source-line-no">297</span><span id="line-297"> RegionInfo oldHri = RegionInfoBuilder.newBuilder(table.getName()).build();</span>
<span class="source-line-no">298</span><span id="line-298"> CompactionDescriptor compactionDescriptor =</span>
<span class="source-line-no">299</span><span id="line-299"> ProtobufUtil.toCompactionDescriptor(oldHri, FAMILY, Lists.newArrayList(new Path("/a")),</span>
<span class="source-line-no">300</span><span id="line-300"> Lists.newArrayList(new Path("/b")), new Path("store_dir"));</span>
<span class="source-line-no">301</span><span id="line-301"> WALUtil.writeCompactionMarker(compactingRegion.getWAL(),</span>
<span class="source-line-no">302</span><span id="line-302"> ((HRegion) compactingRegion).getReplicationScope(), oldHri, compactionDescriptor,</span>
<span class="source-line-no">303</span><span id="line-303"> compactingRegion.getMVCC(), null);</span>
<span class="source-line-no">304</span><span id="line-304"></span>
<span class="source-line-no">305</span><span id="line-305"> // Wait till flush has happened, otherwise there won't be multiple store files</span>
<span class="source-line-no">306</span><span id="line-306"> long startWaitTime = EnvironmentEdgeManager.currentTime();</span>
<span class="source-line-no">307</span><span id="line-307"> while (</span>
<span class="source-line-no">308</span><span id="line-308"> compactingRegion.getEarliestFlushTimeForAllStores() &lt;= lastFlushTime</span>
<span class="source-line-no">309</span><span id="line-309"> || compactingRegion.countStoreFiles() &lt;= 1</span>
<span class="source-line-no">310</span><span id="line-310"> ) {</span>
<span class="source-line-no">311</span><span id="line-311"> LOG.info("Waiting for the region to flush "</span>
<span class="source-line-no">312</span><span id="line-312"> + compactingRegion.getRegionInfo().getRegionNameAsString());</span>
<span class="source-line-no">313</span><span id="line-313"> Thread.sleep(1000);</span>
<span class="source-line-no">314</span><span id="line-314"> admin.flush(table.getName());</span>
<span class="source-line-no">315</span><span id="line-315"> assertTrue("Timed out waiting for the region to flush",</span>
<span class="source-line-no">316</span><span id="line-316"> EnvironmentEdgeManager.currentTime() - startWaitTime &lt; 30000);</span>
<span class="source-line-no">317</span><span id="line-317"> }</span>
<span class="source-line-no">318</span><span id="line-318"> assertTrue(compactingRegion.countStoreFiles() &gt; 1);</span>
<span class="source-line-no">319</span><span id="line-319"> final byte REGION_NAME[] = compactingRegion.getRegionInfo().getRegionName();</span>
<span class="source-line-no">320</span><span id="line-320"> LOG.info("Asking for compaction");</span>
<span class="source-line-no">321</span><span id="line-321"> admin.majorCompact(TABLE_NAME);</span>
<span class="source-line-no">322</span><span id="line-322"> LOG.info("Waiting for compaction to be about to start");</span>
<span class="source-line-no">323</span><span id="line-323"> compactingRegion.waitForCompactionToBlock();</span>
<span class="source-line-no">324</span><span id="line-324"> LOG.info("Starting a new server");</span>
<span class="source-line-no">325</span><span id="line-325"> RegionServerThread newServerThread = TEST_UTIL.getMiniHBaseCluster().startRegionServer();</span>
<span class="source-line-no">326</span><span id="line-326"> final HRegionServer newServer = newServerThread.getRegionServer();</span>
<span class="source-line-no">327</span><span id="line-327"> LOG.info("Killing region server ZK lease");</span>
<span class="source-line-no">328</span><span id="line-328"> TEST_UTIL.expireRegionServerSession(0);</span>
<span class="source-line-no">329</span><span id="line-329"> CompactionBlockerRegion newRegion = null;</span>
<span class="source-line-no">330</span><span id="line-330"> startWaitTime = EnvironmentEdgeManager.currentTime();</span>
<span class="source-line-no">331</span><span id="line-331"> LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME));</span>
<span class="source-line-no">332</span><span id="line-332"></span>
<span class="source-line-no">333</span><span id="line-333"> // wait for region to be assigned and to go out of log replay if applicable</span>
<span class="source-line-no">334</span><span id="line-334"> Waiter.waitFor(c, 60000, new Waiter.Predicate&lt;Exception&gt;() {</span>
<span class="source-line-no">335</span><span id="line-335"> @Override</span>
<span class="source-line-no">336</span><span id="line-336"> public boolean evaluate() throws Exception {</span>
<span class="source-line-no">337</span><span id="line-337"> Region newRegion = newServer.getOnlineRegion(REGION_NAME);</span>
<span class="source-line-no">338</span><span id="line-338"> return newRegion != null;</span>
<span class="source-line-no">339</span><span id="line-339"> }</span>
<span class="source-line-no">340</span><span id="line-340"> });</span>
<span class="source-line-no">341</span><span id="line-341"></span>
<span class="source-line-no">342</span><span id="line-342"> newRegion = (CompactionBlockerRegion) newServer.getOnlineRegion(REGION_NAME);</span>
<span class="source-line-no">343</span><span id="line-343"></span>
<span class="source-line-no">344</span><span id="line-344"> // After compaction of old region finishes on the server that was going down, make sure that</span>
<span class="source-line-no">345</span><span id="line-345"> // all the files we expect are still working when region is up in new location.</span>
<span class="source-line-no">346</span><span id="line-346"> FileSystem fs = newRegion.getFilesystem();</span>
<span class="source-line-no">347</span><span id="line-347"> for (String f : newRegion.getStoreFileList(new byte[][] { FAMILY })) {</span>
<span class="source-line-no">348</span><span id="line-348"> assertTrue("After compaction, does not exist: " + f, fs.exists(new Path(f)));</span>
<span class="source-line-no">349</span><span id="line-349"> }</span>
<span class="source-line-no">350</span><span id="line-350"> LOG.info("Allowing compaction to proceed");</span>
<span class="source-line-no">351</span><span id="line-351"> compactingRegion.allowCompactions();</span>
<span class="source-line-no">352</span><span id="line-352"> while (compactingRegion.compactCount.get() == 0) {</span>
<span class="source-line-no">353</span><span id="line-353"> Thread.sleep(1000);</span>
<span class="source-line-no">354</span><span id="line-354"> }</span>
<span class="source-line-no">355</span><span id="line-355"> // The server we killed stays up until the compaction that was started before it was killed</span>
<span class="source-line-no">356</span><span id="line-356"> // completes. In logs you should see the old regionserver now going down.</span>
<span class="source-line-no">357</span><span id="line-357"> LOG.info("Compaction finished");</span>
<span class="source-line-no">358</span><span id="line-358"></span>
<span class="source-line-no">359</span><span id="line-359"> // If we survive the split keep going...</span>
<span class="source-line-no">360</span><span id="line-360"> // Now we make sure that the region isn't totally confused. Load up more rows.</span>
<span class="source-line-no">361</span><span id="line-361"> TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT,</span>
<span class="source-line-no">362</span><span id="line-362"> FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);</span>
<span class="source-line-no">363</span><span id="line-363"> admin.majorCompact(TABLE_NAME);</span>
<span class="source-line-no">364</span><span id="line-364"> startWaitTime = EnvironmentEdgeManager.currentTime();</span>
<span class="source-line-no">365</span><span id="line-365"> while (newRegion.compactCount.get() == 0) {</span>
<span class="source-line-no">366</span><span id="line-366"> Thread.sleep(1000);</span>
<span class="source-line-no">367</span><span id="line-367"> assertTrue("New region never compacted",</span>
<span class="source-line-no">368</span><span id="line-368"> EnvironmentEdgeManager.currentTime() - startWaitTime &lt; 180000);</span>
<span class="source-line-no">369</span><span id="line-369"> }</span>
<span class="source-line-no">370</span><span id="line-370"> int count;</span>
<span class="source-line-no">371</span><span id="line-371"> for (int i = 0;; i++) {</span>
<span class="source-line-no">372</span><span id="line-372"> try {</span>
<span class="source-line-no">373</span><span id="line-373"> count = HBaseTestingUtil.countRows(table);</span>
<span class="source-line-no">374</span><span id="line-374"> break;</span>
<span class="source-line-no">375</span><span id="line-375"> } catch (DoNotRetryIOException e) {</span>
<span class="source-line-no">376</span><span id="line-376"> // wait up to 30s</span>
<span class="source-line-no">377</span><span id="line-377"> if (i &gt;= 30 || !e.getMessage().contains("File does not exist")) {</span>
<span class="source-line-no">378</span><span id="line-378"> throw e;</span>
<span class="source-line-no">379</span><span id="line-379"> }</span>
<span class="source-line-no">380</span><span id="line-380"> Thread.sleep(1000);</span>
<span class="source-line-no">381</span><span id="line-381"> }</span>
<span class="source-line-no">382</span><span id="line-382"> }</span>
<span class="source-line-no">383</span><span id="line-383"> if (policy == MemoryCompactionPolicy.EAGER || policy == MemoryCompactionPolicy.ADAPTIVE) {</span>
<span class="source-line-no">384</span><span id="line-384"> assertTrue(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT &gt;= count);</span>
<span class="source-line-no">385</span><span id="line-385"> } else {</span>
<span class="source-line-no">386</span><span id="line-386"> assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, count);</span>
<span class="source-line-no">387</span><span id="line-387"> }</span>
<span class="source-line-no">388</span><span id="line-388"> } finally {</span>
<span class="source-line-no">389</span><span id="line-389"> if (compactingRegion != null) {</span>
<span class="source-line-no">390</span><span id="line-390"> compactingRegion.allowCompactions();</span>
<span class="source-line-no">391</span><span id="line-391"> }</span>
<span class="source-line-no">392</span><span id="line-392"> admin.close();</span>
<span class="source-line-no">393</span><span id="line-393"> TEST_UTIL.shutdownMiniCluster();</span>
<span class="source-line-no">394</span><span id="line-394"> }</span>
<span class="source-line-no">395</span><span id="line-395"> }</span>
<span class="source-line-no">396</span><span id="line-396">}</span>
</pre>
</div>
</main>
</body>
</html>