Merge branch 'master' into HDDS-5447-httpfs
diff --git a/dev-support/ci/pr_title_check.bats b/dev-support/ci/pr_title_check.bats
index 5b7ebda..a3c9043 100644
--- a/dev-support/ci/pr_title_check.bats
+++ b/dev-support/ci/pr_title_check.bats
@@ -66,6 +66,10 @@
# case in summary does not matter
run dev-support/ci/pr_title_check.sh 'HDDS-1234. hello world in lower case'
assert_output 'OK'
+
+ # starts with 'Revert "'
+ run dev-support/ci/pr_title_check.sh 'Revert "HDDS-1234. Hello World"'
+ assert_output -e 'OK$'
}
@test "check illegal PR title examples" {
@@ -112,4 +116,28 @@
# double spaces in summary
run dev-support/ci/pr_title_check.sh 'HDDS-1234. Hello World'
assert_output 'Fail: two consecutive spaces'
+
+ # Invalid revert title 1
+ run dev-support/ci/pr_title_check.sh 'revert "HDDS-1234. Hello World'
+ assert_output 'Fail: must start with HDDS'
+
+ # Invalid revert title 2
+ run dev-support/ci/pr_title_check.sh 'Revert"HDDS-1234. Hello World'
+ assert_output 'Fail: must start with HDDS'
+
+ # Invalid revert title 3
+ run dev-support/ci/pr_title_check.sh 'Revert HDDS-1234. Hello World'
+ assert_output 'Fail: must start with HDDS'
+
+ # Invalid revert title 4
+ run dev-support/ci/pr_title_check.sh 'Revert: HDDS-1234. Hello World'
+ assert_output 'Fail: must start with HDDS'
+
+ # Invalid revert title 5
+ run dev-support/ci/pr_title_check.sh 'Revert: "HDDS-1234. Hello World"'
+ assert_output 'Fail: must start with HDDS'
+
+ # Invalid revert title 6
+ run dev-support/ci/pr_title_check.sh 'Revert "Hello World"'
+ assert_output -e 'Fail: must start with HDDS$'
}
diff --git a/dev-support/ci/pr_title_check.sh b/dev-support/ci/pr_title_check.sh
index b48e90c..f50004a 100755
--- a/dev-support/ci/pr_title_check.sh
+++ b/dev-support/ci/pr_title_check.sh
@@ -35,6 +35,13 @@
fi
}
+# strip starting 'Revert "', if any
+TITLE=${TITLE#Revert \"}
+if [ "$1" != "${TITLE}" ]; then
+ echo "Leading 'Revert \"' in the PR title has been stripped solely for this title checking purpose. Performing actual title check on:"
+ echo "${TITLE}"
+fi
+
assertMatch '^HDDS' 'Fail: must start with HDDS'
assertMatch '^HDDS-' 'Fail: missing dash in Jira'
assertNotMatch '^HDDS-0' 'Fail: leading zero in Jira'
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 95f67e9..7a61f8e 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -302,4 +302,4 @@ RetryPolicy> getRetryPolicyByException(int maxRetryCount,
public static List<Class<? extends Exception>> getExceptionList() {
return EXCEPTION_LIST;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
index 73ad78c..da3822f 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
@@ -20,4 +20,4 @@
/**
* Client facing classes for the container operations.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
index 9e5ba6a..a90dca9 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
@@ -34,4 +34,4 @@
* interface is defined by the
* {@link org.apache.hadoop.hdds.scm.XceiverClientSpi} interface.
*/
-package org.apache.hadoop.hdds.scm;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm;
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java
index 94328f0..a1c3052 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteArrayReader.java
@@ -64,4 +64,4 @@ public int readFromBlock(InputStream is, int numBytesToRead) throws
public int getTargetLength() {
return this.targetLen;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferReader.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferReader.java
index fedfb95..217d725 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferReader.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferReader.java
@@ -68,4 +68,4 @@ public int readFromBlock(InputStream is, int numBytesToRead) throws
public int getTargetLength() {
return this.targetLen;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteReaderStrategy.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteReaderStrategy.java
index bb78a94..4994fff 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteReaderStrategy.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteReaderStrategy.java
@@ -38,4 +38,4 @@ public interface ByteReaderStrategy {
* @return the target length to read.
*/
int getTargetLength();
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java
index de868c7..7f693c7 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java
@@ -88,4 +88,4 @@ public synchronized void seek(long l) throws IOException {
public synchronized boolean seekToNewSource(long l) throws IOException {
return false;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
index d34e4dc..b889aa3 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
@@ -56,4 +56,4 @@ public static StreamBuffer allocate(int size) {
return new StreamBuffer(ByteBuffer.allocate(size));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
index 6e7ce94..d07e1e4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
@@ -20,4 +20,4 @@
/**
* Low level IO streams to upload/download chunks from container service.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
index 2b0cf12..36b1f13 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
@@ -226,4 +226,4 @@ public long getReplicatedMinCommitIndex() {
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
index cd53d71..eca5d23 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
@@ -43,4 +43,4 @@ public void releaseAndReallocate() {
Assert.assertEquals(cb1, allocated);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java
index abdd04e..86f61f9 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java
@@ -18,4 +18,4 @@
/**
* This package contains Ozone InputStream related tests.
*/
-package org.apache.hadoop.hdds.scm.storage;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.storage;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/package-info.java
index b993889..6452751 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/package-info.java
@@ -20,4 +20,4 @@
/**
* Generic Ozone/HDDS specific annotations..
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
index e81f134..bf1ff67 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
@@ -20,4 +20,4 @@
/**
* Base property types for HDDS containers and replications.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsPrometheusConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsPrometheusConfig.java
index 991afda..ae162d0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsPrometheusConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsPrometheusConfig.java
@@ -41,4 +41,4 @@ public String getPrometheusEndpointToken() {
public void setPrometheusEndpointToken(String prometheusEndpointToken) {
this.prometheusEndpointToken = prometheusEndpointToken;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
index 948057e..64d21d1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.conf;
\ No newline at end of file
+package org.apache.hadoop.hdds.conf;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/package-info.java
index 381c811..945b74c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/package-info.java
@@ -21,4 +21,4 @@
/**
* Freon related helper classes used for load testing.
*/
-package org.apache.hadoop.hdds.freon;
\ No newline at end of file
+package org.apache.hadoop.hdds.freon;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
index f8894e6..523b350 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
@@ -20,4 +20,4 @@
/**
* Generic HDDS specific configurator and helper classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
index 7dae0fc..b03ccfa 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains HDDS protocol related classes.
*/
-package org.apache.hadoop.hdds.protocol;
\ No newline at end of file
+package org.apache.hadoop.hdds.protocol;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
index d100b4a..2cc6b079 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
@@ -110,4 +110,4 @@ public static ServerNotLeaderException convertToNotLeaderException(
}
return serverNotLeaderException;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index 713d720..916bd7f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -20,6 +20,7 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto;
import org.apache.hadoop.hdds.scm.DatanodeAdminError;
import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo;
@@ -369,7 +370,20 @@ StartContainerBalancerResponseProto startContainerBalancer(
void transferLeadership(String newLeaderId) throws IOException;
/**
- * Reset the expired deleted block retry count.
+ * Return the failed transactions of the Deleted blocks. A transaction is
+ * considered to be failed if it has been sent more than MAX_RETRY limit
+ * and its count is reset to -1.
+ *
+ * @param count Maximum num of returned transactions, if < 0. return all.
+ * @param startTxId The least transaction id to start with.
+ * @return a list of failed deleted block transactions.
+ * @throws IOException
+ */
+ List<DeletedBlocksTransactionInfo> getFailedDeletedBlockTxn(int count,
+ long startTxId) throws IOException;
+
+ /**
+ * Reset the failed deleted block retry count.
* @param txIDs transactionId list to be reset
* @throws IOException
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
index 4e406e6..693e646 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java
@@ -33,4 +33,4 @@ public class ContainerNotOpenException extends StorageContainerException {
public ContainerNotOpenException(String message) {
super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
index ffe0d3d..1e09201 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
@@ -19,4 +19,4 @@
/**
Contains protocol buffer helper classes and utilites used in
impl.
- **/
\ No newline at end of file
+ **/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
index 19153b0..56cd3ce 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.hdds.scm.ha;
/**
Utility slasses for SCM HA.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
index e0451d4..f2648f3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
@@ -540,4 +540,4 @@ private Map<String, Integer> getExcludedScopeNodeCount(
}
return nodeCounts;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
index 784b4cf..784ff24 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
@@ -911,4 +911,4 @@ private void checkAncestorGen(int ancestorGen) {
(maxLevel - 1) + "]");
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java
index 93a32b7..e7a45f6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java
@@ -258,4 +258,4 @@ private String getPath() {
this.location + this.name :
this.location + PATH_SEPARATOR_STR + this.name;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
index fc8e23b..f235d4b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java
@@ -180,4 +180,4 @@ public void setSublayer(List<NodeSchema> sublayer) {
public List<NodeSchema> getSublayer() {
return sublayer;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java
index 375af7f..10e1a53 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.hdds.scm.net;
/**
The network topology supported by Ozone.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index 116e767..b91e7f3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -153,6 +153,11 @@ void setLeaderId(UUID leaderId) {
this.leaderId = leaderId;
}
+ /** @return the number of datanodes in this pipeline. */
+ public int size() {
+ return nodeStatus.size();
+ }
+
/**
* Returns the list of nodes which form this pipeline.
*
@@ -216,18 +221,46 @@ public DatanodeDetails getLeaderNode() throws IOException {
}
public DatanodeDetails getFirstNode() throws IOException {
+ return getFirstNode(null);
+ }
+
+ public DatanodeDetails getFirstNode(Set<DatanodeDetails> excluded)
+ throws IOException {
+ if (excluded == null) {
+ excluded = Collections.emptySet();
+ }
if (nodeStatus.isEmpty()) {
throw new IOException(String.format("Pipeline=%s is empty", id));
}
- return nodeStatus.keySet().iterator().next();
+ for (DatanodeDetails d : nodeStatus.keySet()) {
+ if (!excluded.contains(d)) {
+ return d;
+ }
+ }
+ throw new IOException(String.format(
+ "All nodes are excluded: Pipeline=%s, excluded=%s", id, excluded));
}
public DatanodeDetails getClosestNode() throws IOException {
+ return getClosestNode(null);
+ }
+
+ public DatanodeDetails getClosestNode(Set<DatanodeDetails> excluded)
+ throws IOException {
+ if (excluded == null) {
+ excluded = Collections.emptySet();
+ }
if (nodesInOrder.get() == null || nodesInOrder.get().isEmpty()) {
LOG.debug("Nodes in order is empty, delegate to getFirstNode");
- return getFirstNode();
+ return getFirstNode(excluded);
}
- return nodesInOrder.get().get(0);
+ for (DatanodeDetails d : nodesInOrder.get()) {
+ if (!excluded.contains(d)) {
+ return d;
+ }
+ }
+ throw new IOException(String.format(
+ "All nodes are excluded: Pipeline=%s, excluded=%s", id, excluded));
}
public boolean isClosed() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
index 51adc88..a23a567 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
@@ -21,4 +21,4 @@
That means that we can have a replication pipeline build on
Ratis, Simple or some other protocol. All Pipeline managers
the entities in charge of pipelines reside in the package.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index bd03bc8..7690b2e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -20,6 +20,7 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type;
import org.apache.hadoop.hdds.scm.DatanodeAdminError;
@@ -317,7 +318,20 @@ Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
void transferLeadership(String newLeaderId) throws IOException;
/**
- * Reset the expired deleted block retry count.
+ * Return the failed transactions of the Deleted blocks. A transaction is
+ * considered to be failed if it has been sent more than MAX_RETRY limit
+ * and its count is reset to -1.
+ *
+ * @param count Maximum num of returned transactions, if < 0. return all.
+ * @param startTxId The least transaction id to start with.
+ * @return a list of failed deleted block transactions.
+ * @throws IOException
+ */
+ List<DeletedBlocksTransactionInfo> getFailedDeletedBlockTxn(int count,
+ long startTxId) throws IOException;
+
+ /**
+ * Reset the failed deleted block retry count.
*
* @param txIDs transactionId list to be reset
* @return num of successful reset
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index b921d4c..bfe3ebc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -21,8 +21,10 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
@@ -47,6 +49,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileResponseProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerResponseProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
@@ -64,12 +67,16 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Implementation of all container protocol calls performed by Container
* clients.
*/
public final class ContainerProtocolCalls {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ContainerProtocolCalls.class);
/**
* There is no need to instantiate this class.
@@ -278,7 +285,32 @@ public static ContainerProtos.ReadChunkResponseProto readChunk(
.setBlockID(blockID.getDatanodeBlockIDProtobuf())
.setChunkData(chunk)
.setReadChunkVersion(ContainerProtos.ReadChunkVersion.V1);
- String id = xceiverClient.getPipeline().getClosestNode().getUuidString();
+ final Pipeline pipeline = xceiverClient.getPipeline();
+ final Set<DatanodeDetails> excluded = new HashSet<>();
+ for (; ;) {
+ final DatanodeDetails d = pipeline.getClosestNode(excluded);
+
+ try {
+ return readChunk(xceiverClient, chunk, blockID,
+ validators, token, readChunkRequest, d);
+ } catch (IOException e) {
+ excluded.add(d);
+ if (excluded.size() < pipeline.size()) {
+ LOG.warn(toErrorMessage(chunk, blockID, d), e);
+ } else {
+ throw e;
+ }
+ }
+ }
+ }
+
+ private static ContainerProtos.ReadChunkResponseProto readChunk(
+ XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID,
+ List<CheckedBiFunction> validators,
+ Token<? extends TokenIdentifier> token,
+ ReadChunkRequestProto.Builder readChunkRequest,
+ DatanodeDetails d) throws IOException {
+ final String id = d.getUuidString();
ContainerCommandRequestProto.Builder builder =
ContainerCommandRequestProto.newBuilder().setCmdType(Type.ReadChunk)
.setContainerID(blockID.getContainerID())
@@ -289,7 +321,30 @@ public static ContainerProtos.ReadChunkResponseProto readChunk(
ContainerCommandRequestProto request = builder.build();
ContainerCommandResponseProto reply =
xceiverClient.sendCommand(request, validators);
- return reply.getReadChunk();
+ final ReadChunkResponseProto response = reply.getReadChunk();
+ final long readLen = getLen(response);
+ if (readLen != chunk.getLen()) {
+ throw new IOException(toErrorMessage(chunk, blockID, d)
+ + ": readLen=" + readLen);
+ }
+ return response;
+ }
+
+ static String toErrorMessage(ChunkInfo chunk, BlockID blockId,
+ DatanodeDetails d) {
+ return String.format("Failed to read chunk %s (len=%s) %s from %s",
+ chunk.getChunkName(), chunk.getLen(), blockId, d);
+ }
+
+ static long getLen(ReadChunkResponseProto response) {
+ if (response.hasData()) {
+ return response.getData().size();
+ } else if (response.hasDataBuffers()) {
+ return response.getDataBuffers() .getBuffersList().stream()
+ .mapToLong(ByteString::size).sum();
+ } else {
+ return -1;
+ }
}
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/utils/package-info.java
index edd6b91..1e3121b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/utils/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/utils/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains utility classes for the SCM and client protocols.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java
index b980592..dc2dc43 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java
@@ -20,4 +20,4 @@
/**
* Exceptions thrown by SCM security classes.
*/
-package org.apache.hadoop.hdds.security.exception;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.exception;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLStatus.java
index f614cc0..099d050 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLStatus.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLStatus.java
@@ -84,4 +84,4 @@ public String toString() {
", pendingCRLIds=" + StringUtils.join(pendingCRLIds, ",") +
'}';
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
index a6369c6..33f6071 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
@@ -96,4 +96,4 @@ The CA implementation ( as of now it is called DefaultCA) receives a CSR from
Once the PKI Profile validates the request, it is either auto approved or
queued for manual review.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/package-info.java
index 80d32bf..8136e73 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/package-info.java
@@ -20,4 +20,4 @@
/**
* Server Util classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java
index 3ead03b..7c59083 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java
@@ -20,4 +20,4 @@
/**
* Helper classes to use distributed tracing in Ozone components.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
index 6fff80f6..5637041 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java
@@ -66,4 +66,4 @@ public static synchronized long next() {
throw new RuntimeException("Got invalid time," +
" cannot generate unique Id. Current time: " + time);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index e447dd7..5e924be 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -603,6 +603,11 @@ public final class OzoneConfigKeys {
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT =
TimeUnit.HOURS.toMillis(1);
+ public static final String OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF =
+ "ozone.om.snapshot.force.full.diff";
+
+ public static final boolean OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT = false;
+
/**
* There is no need to instantiate this class.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
index 16e88df..4e1fe23 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
@@ -51,7 +51,8 @@ public enum SCMAction implements AuditAction {
ADD_SCM,
GET_REPLICATION_MANAGER_REPORT,
RESET_DELETED_BLOCK_RETRY_COUNT,
- TRANSFER_LEADERSHIP;
+ TRANSFER_LEADERSHIP,
+ GET_FAILED_DELETED_BLOCKS_TRANSACTION;
@Override
public String getAction() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/package-info.java
index f01aef4..dbbc2e7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/package-info.java
@@ -15,4 +15,4 @@
* the License.
*/
-package org.apache.hadoop.ozone.common.ha.ratis;
\ No newline at end of file
+package org.apache.hadoop.ozone.common.ha.ratis;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
index 6517e58..afcd396 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.common;
\ No newline at end of file
+package org.apache.hadoop.ozone.common;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
index bf8cbd5..97122e6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
@@ -65,4 +65,4 @@ public STATE getNextState(STATE from, EVENT e)
public void addTransition(STATE from, STATE to, EVENT e) {
transitions.getUnchecked(e).put(from, to);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
index 045409e..f8a159c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.common.statemachine;
/**
state machine template class for ozone.
- **/
\ No newline at end of file
+ **/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/package-info.java
index 11741fa..82262c7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains common utility classes for HDDS.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
index cb6ec51..5eade76 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
@@ -19,4 +19,4 @@
/**
* Configuration classes for Ozone.
*/
-package org.apache.hadoop.ozone.conf;
\ No newline at end of file
+package org.apache.hadoop.ozone.conf;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
index fa5df11..ae754df 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
@@ -20,4 +20,4 @@
/**
* Helper classes for the container protocol communication.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
index 418f412..578cd03 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
@@ -42,4 +42,4 @@ public LeaseException(String message) {
super(message);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
index 5c677ce..4c42ea8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.lock;
/*
This package contains the lock related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
index d88cdbb..014f830 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
@@ -69,4 +69,4 @@ public static void calculateChunkedSumsByteArray(int bytesPerSum,
sumsOffset, data, dataOffset, dataLength);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
index a2c2038..bf88702 100644
--- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
+++ b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
@@ -26,4 +26,4 @@
hadoopProtoc2Version=${proto2.hadooprpc.protobuf.version}
hadoopProtoc3Version=${proto3.hadooprpc.protobuf.version}
grpcProtocVersion=${grpc.protobuf-compile.version}
-compilePlatform=${os.detected.classifier}
\ No newline at end of file
+compilePlatform=${os.detected.classifier}
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d5f00e3..f525478 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -840,9 +840,7 @@
</property>
<property>
<name>ozone.scm.keyvalue.container.deletion-choosing.policy</name>
- <value>
- org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
- </value>
+ <value>org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
The policy used for choosing desired keyvalue containers for block deletion.
@@ -865,9 +863,7 @@
</property>
<property>
<name>ozone.scm.container.placement.impl</name>
- <value>
- org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom
- </value>
+ <value>org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
The full name of class which implements
@@ -879,9 +875,7 @@
</property>
<property>
<name>ozone.scm.container.placement.ec.impl</name>
- <value>
- org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackScatter
- </value>
+ <value>org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackScatter</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
The full name of class which implements
@@ -946,9 +940,7 @@
</property>
<property>
<name>ozone.scm.pipeline.leader-choose.policy</name>
- <value>
- org.apache.hadoop.hdds.scm.pipeline.leader.choose.algorithms.MinLeaderCountChoosePolicy
- </value>
+ <value>org.apache.hadoop.hdds.scm.pipeline.leader.choose.algorithms.MinLeaderCountChoosePolicy</value>
<tag>OZONE, SCM, PIPELINE</tag>
<description>
The policy used for choosing desired leader for pipeline creation.
@@ -1349,14 +1341,12 @@
<property>
<name>hadoop.tags.custom</name>
- <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
- CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,RECON</value>
+ <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,RECON</value>
</property>
<property>
<name>ozone.tags.system</name>
- <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,
- CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON</value>
+ <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON</value>
</property>
@@ -2935,11 +2925,10 @@
<description>
The base dir for HTTP Jetty server to extract contents. If this property
is not configured, by default, Jetty will create a directory inside the
- directory named by the java.io.tmpdir System property(/tmp by default).
- While in production environment, it's strongly suggested to instruct Jetty
- to use a different parent directory by setting this property to the name
- of the desired parent directory. The value of the property will be used to
- set Jetty context attribute 'org.eclipse.jetty.webapp.basetempdir'.
+ directory named by the ${ozone.metadata.dirs}/webserver. While in production environment,
+ it's strongly suggested instructing Jetty to use a different parent directory by
+ setting this property to the name of the desired parent directory. The value of the
+ property will be used to set Jetty context attribute 'org.eclipse.jetty.webapp.basetempdir'.
The directory named by this property must exist and be writeable.
</description>
</property>
@@ -3608,4 +3597,14 @@
production environments.
</description>
</property>
+
+ <property>
+ <name>ozone.om.snapshot.force.full.diff</name>
+ <value>false</value>
+ <tag>OZONE, OM</tag>
+ <description>
+ If true, snapshot diff will always perform full diff (can be slow)
+ without using the optimised DAG based pruning approach
+ </description>
+ </property>
</configuration>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestECReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestECReplicationConfig.java
index 2387b99..3e9189f 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestECReplicationConfig.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestECReplicationConfig.java
@@ -77,4 +77,4 @@ void testSerializeToProtoAndBack() {
assertEquals(orig, recovered);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestGeneratedConfigurationOverwrite.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestGeneratedConfigurationOverwrite.java
index 03fd2be..be40692 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestGeneratedConfigurationOverwrite.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestGeneratedConfigurationOverwrite.java
@@ -63,4 +63,4 @@ public void getConfigurationObject() {
Assertions.assertNotEquals(
conf.getObject(SimpleConfiguration.class).getWaitTime(), 0);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
index e72c902..343e46f 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains the OzoneConfiguration related tests.
*/
-package org.apache.hadoop.hdds.conf;
\ No newline at end of file
+package org.apache.hadoop.hdds.conf;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
index 3fc7c4e..eb68dbd 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
@@ -60,4 +60,4 @@ public void testGetUsed() throws IOException {
assertTrue(subject.getUsedSpace() >= FILE_SIZE - 20);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java
index 5203b98..de9042a 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java
@@ -87,4 +87,4 @@ public void testObjectCreatedFromProtoWithReplicaIndedx() {
Assertions.assertEquals(proto.getState(), info.getState());
Assertions.assertEquals(4, info.getReplicaIndex());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
index 7966941..d22d008 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.hdds.scm;
/**
Test cases for SCM client classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
index f77e84a1..44ebe38 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
@@ -57,4 +57,4 @@ void testExtract() throws Exception {
private static String pad(String s) {
return "0000000000000000".substring(s.length()) + s;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
index 18e1200..a2de316 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.hdds.tracing;
/**
Test cases for ozone tracing.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/test/MockComponent.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/test/MockComponent.java
index 93b876f..0d466ae 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/test/MockComponent.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/test/MockComponent.java
@@ -64,4 +64,4 @@ public void execute(MockComponent arg) {
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java
index cc27a3b..0d9f524 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java
@@ -74,4 +74,4 @@ private void addTasks(List<Callable<Integer>> tasks) {
tasks.add(task);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
index f93e3fd..3f1a394 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
@@ -19,4 +19,4 @@
/**
* DB test Utils.
*/
-package org.apache.hadoop.hdds.utils;
\ No newline at end of file
+package org.apache.hadoop.hdds.utils;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
index dfdbba1..9addf74 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
@@ -203,4 +203,4 @@ public void testConcurrentWriteLockWithDifferentResource() throws Exception {
10 * count * sleep);
Assertions.assertEquals(count, done.get());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java
index a96bc16..cf4eb65 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.lock;
/*
This package contains the lock related test classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
index ab82d02..f847c13 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
@@ -195,4 +195,4 @@ public String mockMethodV2() {
return "v2";
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java
index 0e27912..236285b 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java
@@ -202,4 +202,4 @@ public void execute(MockComponent arg) throws Exception {
throw new IllegalStateException("Failed action!!");
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/PostConstruct.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/PostConstruct.java
index d9c0657..a70fa9a 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/PostConstruct.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/PostConstruct.java
@@ -29,4 +29,4 @@
@Retention(RUNTIME)
@Target(METHOD)
public @interface PostConstruct {
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationSourceTest.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationSourceTest.java
index 291ad9a..d4437cb 100644
--- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationSourceTest.java
+++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationSourceTest.java
@@ -47,4 +47,4 @@ void getPropsMatchPrefix() {
entry.keySet().toArray()[0]);
Assert.assertEquals("value", entry.values().toArray()[0]);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
index 0edb01a..cf02145 100644
--- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
+++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java
@@ -45,4 +45,4 @@ public void testInit() {
Assert.assertTrue("Generated config should contain tags",
builder.toString().contains("<tag>OZONE, SECURITY</tag>"));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java
index 8fe3f2d..ca25b59 100644
--- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java
+++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java
@@ -59,4 +59,4 @@ public void testGeneratedXml() throws FileNotFoundException {
Assert.assertTrue("Generated config should contain tags",
generatedXml.contains("<tag>MANAGEMENT</tag>"));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java
index 18ad66c..f7b484c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/DNMXBeanImpl.java
@@ -29,4 +29,4 @@ public DNMXBeanImpl(
VersionInfo versionInfo) {
super(versionInfo);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java
index d6d36a3..76d1c00 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockDeletingServiceMetrics.java
@@ -99,4 +99,4 @@ public String toString() {
.append("failureCount = " + failureCount.value()).append("\t");
return buffer.toString();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index 3d6cb3b..09eb65d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -125,4 +125,4 @@ public void incContainerBytesStats(ContainerProtos.Type type, long bytes) {
public long getContainerBytesMetrics(ContainerProtos.Type type) {
return opsBytesArray[type.ordinal()].value();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
index 21f31e1..76a6f20 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
@@ -19,4 +19,4 @@
/**
Contains protocol buffer helper classes and utilites used in
impl.
- **/
\ No newline at end of file
+ **/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
index 16da5d9..7948edc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
@@ -19,4 +19,4 @@
/**
This package is contains Ozone container implementation.
-**/
\ No newline at end of file
+**/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java
index d83bf95..911a75a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java
@@ -17,4 +17,4 @@
package org.apache.hadoop.ozone.container.common.interfaces;
/**
This package contains common ozone container interfaces.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java
index 1638a36..49b6f36 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java
@@ -25,4 +25,4 @@
Ozone uses these abstractions to build Volumes, Buckets and Keys.
- **/
\ No newline at end of file
+ **/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java
index 404b37a..ffcca56 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java
@@ -77,4 +77,4 @@
* | | | |
* | | | |
* - - - -
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 93c1d01..74d0d19 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -638,4 +638,4 @@ public int getAutoCompactionSmallSstFileNum() {
public void setAutoCompactionSmallSstFileNum(int num) {
this.autoCompactionSmallSstFileNum = num;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeQueueMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeQueueMetrics.java
index 29eaa83..a30c871 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeQueueMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeQueueMetrics.java
@@ -176,4 +176,4 @@ private MetricsInfo getMetricsInfo(String prefix, String metricName) {
String description = "Queue size of " + metricName + " from " + prefix;
return info(metric, description);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
index 64a65f5..e687b95 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
@@ -356,4 +356,4 @@ public String getType() {
}
return SCM_TYPE;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
index 3c28115..42e58eb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/SetNodeOperationalStateCommandHandler.java
@@ -148,4 +148,4 @@ public long getAverageRunTime() {
public int getQueuedCount() {
return 0;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
index 1e9c8dc..150d2a2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java
index feb2f81..32086e7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java
@@ -25,4 +25,4 @@
Start - > getVersion -> Register -> Running -> Shutdown
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java
index 9453d8a..4449359 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java
@@ -18,4 +18,4 @@
/**
This package contains files that guide the state transitions from
Init->Running->Shutdown for the datanode.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
index 1122598..39f120a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java
@@ -17,4 +17,4 @@
package org.apache.hadoop.ozone.container.common.states.endpoint;
/**
This package contains code for RPC endpoints transitions.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
index 780f874..8daa718 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
@@ -55,4 +55,4 @@ public CompletableFuture<?> cleanUp() {
public Executor getExecutor() {
return executor;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
index a992b2d..2b73042 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
@@ -98,4 +98,4 @@ public void close() throws IOException {
public boolean isClosed() {
return getStore().isClosed();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/package-info.java
index a96c1a9..0202150 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains files related to db use in datanodes.
*/
-package org.apache.hadoop.ozone.container.common.utils.db;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common.utils.db;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
index 08264f0..cae2938 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.container.common.utils;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common.utils;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
index 4f4bd06..a148513 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
@@ -1294,4 +1294,4 @@ public String toString() {
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
index 626814e..0d55c08 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
@@ -158,4 +158,4 @@ protected void afterDone() {
delegateRef = null;
timer = null;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
index 1d1d4ad..a67e1ad 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
@@ -139,4 +139,4 @@ public long getTotalCapacity() {
return (getUsed() + getAvailable() + getReserved());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
index 86093c6..7331036 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.container.common.volume;
/**
This package contains volume/ disk related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
index 9a0dac4..a8ce297 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
@@ -199,4 +199,4 @@ public void close() throws IOException {
xceiverClientManager.close();
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
index d4e3f7f..a7e5bfb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
@@ -487,4 +487,4 @@ OptionalLong getTermOfLeaderSCM() {
.map(StateContext::getTermOfLeaderSCM)
.orElse(OptionalLong.empty());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java
index 7267cdc..fe28b9c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinatorTask.java
@@ -22,7 +22,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
import java.util.Objects;
/**
@@ -74,7 +73,7 @@ public void runTask() {
long elapsed = Time.monotonicNow() - start;
LOG.info("Completed {} in {} ms", reconstructionCommandInfo, elapsed);
setStatus(Status.DONE);
- } catch (IOException e) {
+ } catch (Exception e) {
long elapsed = Time.monotonicNow() - start;
LOG.warn("Failed {} after {} ms", reconstructionCommandInfo, elapsed, e);
setStatus(Status.FAILED);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/package-info.java
index 61f4f5c..5b14a50 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.container.ec.reconstruction;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.ec.reconstruction;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index 6c99a53..2a83a38 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -399,4 +399,4 @@ private void handleCorruption(IOException e) {
LOG.error("Corruption detected in container [{}]. Marking it UNHEALTHY.",
containerID, e);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 7a4cc58..1c60109 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -215,9 +215,15 @@ public ContainerCommandResponseProto handle(
ContainerCommandRequestProto request, Container container,
DispatcherContext dispatcherContext) {
- return KeyValueHandler
- .dispatchRequest(this, request, (KeyValueContainer) container,
- dispatcherContext);
+ try {
+ return KeyValueHandler
+ .dispatchRequest(this, request, (KeyValueContainer) container,
+ dispatcherContext);
+ } catch (RuntimeException e) {
+ return ContainerUtils.logAndReturnError(LOG,
+ new StorageContainerException(e, CONTAINER_INTERNAL_ERROR),
+ request);
+ }
}
@VisibleForTesting
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java
index 041f485..eeccefe 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.container.keyvalue.helpers;
/**
This package contains utility classes for KeyValue container type.
- **/
\ No newline at end of file
+ **/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index d822ce5..d274f03 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -362,4 +362,4 @@ private BlockData getBlockByID(DBHandle db, BlockID blockID,
return blockData;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
index 6d19a9f..1267ed7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
@@ -78,4 +78,4 @@ public static ChunkManager createChunkManager(ConfigurationSource conf,
return new ChunkManagerDispatcher(sync, manager, volSet);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
index 53c9f1e..4f652ce 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.container.keyvalue;
/**
This package contains classes for KeyValue container type.
- **/
\ No newline at end of file
+ **/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
index 69d8042..d66e366 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
index 85b431b..7b53ba6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java
@@ -176,4 +176,4 @@ public static long getContainerId(String key) {
private void setSeparator(String keySeparator) {
separator = keySeparator;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
index f8b8d7d..a89e7a8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
@@ -217,4 +217,4 @@ public void compactionIfNeeded() throws Exception {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
index c99c038..4434a00 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.container.ozoneimpl;
/**
Ozone main that calls into the container layer
-**/
\ No newline at end of file
+**/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
index c495593..1174523 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
@@ -124,4 +124,4 @@ public static Path getUntarDirectory(HddsVolume hddsVolume)
return Paths.get(hddsVolume.getVolumeRootDir())
.resolve(CONTAINER_COPY_TMP_DIR).resolve(CONTAINER_COPY_DIR);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
index 38a853c..eb6d00d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.container.replication;
/**
Classes to replicate container data between datanodes.
-**/
\ No newline at end of file
+**/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java
index 1a51012..459528a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java
@@ -20,4 +20,4 @@
/**
* Generic ozone specific classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java
index 3ff7949..2d3896f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SetNodeOperationalStateCommand.java
@@ -86,4 +86,4 @@ public static SetNodeOperationalStateCommand getFromProtobuf(
cmdProto.getNodeOperationalState(),
cmdProto.getStateExpiryEpochSeconds());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 33efc1a..ff25a81 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -125,4 +125,4 @@ public SCMDatanodeResponse processMessage(SCMDatanodeRequest request)
throw new ServiceException(e);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/package-info.java
index acc91fe..43eeb2f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/package-info.java
@@ -19,4 +19,4 @@
/**
* Datanode metadata Testing.
*/
-package org.apache.hadoop.hdds.datanode.metadata;
\ No newline at end of file
+package org.apache.hadoop.hdds.datanode.metadata;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
index 3624309..2f8f523 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
@@ -364,4 +364,4 @@ private DeletedBlocksTransaction createTestDeleteTxn(long txnID,
return DeletedBlocksTransaction.newBuilder().setTxID(txnID)
.setContainerID(containerID).addAllLocalID(blocks).setCount(0).build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
index 07c78c0..e86212c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
@@ -19,4 +19,4 @@
/**
* Datanode container related test-cases.
*/
-package org.apache.hadoop.ozone.container.common.impl;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common.impl;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
index ca3d29d..f9c99bc 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
@@ -19,4 +19,4 @@
/**
* SCM Testing and Mocking Utils.
*/
-package org.apache.hadoop.ozone.container.common;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
index 37615bc..40c35ef 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
@@ -19,4 +19,4 @@
/**
* This package has test cases for all the report publishers which generates
* reports that are sent to SCM via heartbeat.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java
index 8f45118..350be67 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java
@@ -142,4 +142,4 @@ private List<DatanodeDetails> getDatanodes() {
return Arrays.asList(dnOne, dnTwo, dnThree);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
index 1cdadd0..6694b3e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java
@@ -330,4 +330,4 @@ static List<HddsVolume> makeVolumes(
}
return volumes;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
index 3328deb..aff94ac 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java
@@ -19,4 +19,4 @@
/**
* Tests for Container Volumes.
*/
-package org.apache.hadoop.ozone.container.common.volume;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common.volume;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
index d977947..e14c67c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
@@ -18,9 +18,11 @@
package org.apache.hadoop.ozone.container.keyvalue;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher;
@@ -34,10 +36,14 @@
import java.io.IOException;
import java.util.UUID;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.SUCCESS;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.DATANODE_UUID;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getDummyCommandRequestProto;
+import static org.apache.hadoop.ozone.container.ContainerTestHelper.getPutBlockRequest;
+import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestBlockID;
+import static org.apache.hadoop.ozone.container.ContainerTestHelper.getWriteChunkRequest;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -113,6 +119,24 @@ public void testGetSmallFile() throws IOException {
assertEquals(UNKNOWN_BCSID, response.getResult());
}
+ @Test
+ void testNPEFromPutBlock() throws IOException {
+ KeyValueContainer container = new KeyValueContainer(
+ mock(KeyValueContainerData.class),
+ new OzoneConfiguration());
+ KeyValueHandler subject = getDummyHandler();
+
+ BlockID blockID = getTestBlockID(1);
+ ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+ getWriteChunkRequest(MockPipeline.createSingleNodePipeline(),
+ blockID, 123);
+ ContainerProtos.ContainerCommandResponseProto response =
+ subject.handle(
+ getPutBlockRequest(writeChunkRequest),
+ container, null);
+ assertEquals(CONTAINER_INTERNAL_ERROR, response.getResult());
+ }
+
// -- Helper methods below.
private KeyValueHandler getDummyHandler() throws IOException {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
index afbf274..7bb086f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java
@@ -19,4 +19,4 @@
/**
* Chunk Manager Checks.
*/
-package org.apache.hadoop.ozone.container.keyvalue;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.keyvalue;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java
index 3ebfa44..8b8d31d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestMeasuredReplicator.java
@@ -136,4 +136,4 @@ public Instant getQueued() {
// There might be some deviation, so we use >= 1000 here.
Assertions.assertTrue(measuredReplicator.getQueueTime().value() >= 1000);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
index 5c905e0..339d411 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -19,4 +19,4 @@
/**
* Tests for the container replication.
*/
-package org.apache.hadoop.ozone.container.replication;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.replication;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
index 0bfcc4d..6a6a0fb 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
@@ -136,4 +136,4 @@ private String getContent(String name) throws IOException {
private ByteBuf wrap(String content) {
return Unpooled.wrappedBuffer(content.getBytes(StandardCharsets.UTF_8));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestStreamingServer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestStreamingServer.java
index deb4348..d88df82 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestStreamingServer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestStreamingServer.java
@@ -174,4 +174,4 @@ private void streamDir(Path sourceDir, Path destDir, String subdir)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
index 4e8a90b..0d4f6fc 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
@@ -15,4 +15,4 @@
* the License.
*/
package org.apache.hadoop.ozone.container.testutils;
-// Helper classes for ozone and container tests.
\ No newline at end of file
+// Helper classes for ozone and container tests.
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
index 3339540..306cdb7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
@@ -93,4 +93,4 @@ private DatanodeDetails getNewDatanodeDetails() {
.addPort(restPort)
.build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java
index 9bff648..ac9d9b9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java
@@ -109,4 +109,4 @@ private List<DatanodeDetails> getDNDetails(int numDns) {
return dns;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
index b1dfa73..0bfa12a 100644
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
@@ -61,7 +61,7 @@
<!-- Checks whether files end with a new line. -->
<!-- See http://checkstyle.sf.net/config_misc.html#NewlineAtEndOfFile -->
- <!-- module name="NewlineAtEndOfFile"/-->
+ <module name="NewlineAtEndOfFile"/>
<!-- Checks that property files contain the same keys. -->
<!-- See http://checkstyle.sf.net/config_misc.html#Translation -->
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/package-info.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/package-info.java
index afbfa8b..83a5902 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/package-info.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/package-info.java
@@ -28,4 +28,4 @@
package org.apache.ozone.erasurecode;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawDecoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawDecoder.java
index 98c5e8e..d22343c 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawDecoder.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawDecoder.java
@@ -60,4 +60,4 @@ public void release() {
public boolean preferDirectBuffer() {
return hadoopNativeRSRawDecoder.preferDirectBuffer();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawEncoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawEncoder.java
index 62db61e..93dda3e 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawEncoder.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawEncoder.java
@@ -60,4 +60,4 @@ public void release() {
public boolean preferDirectBuffer() {
return hadoopNativeRSRawEncoder.preferDirectBuffer();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java
index b2088ec..6e382de 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java
@@ -50,4 +50,4 @@ public String getCoderName() {
public String getCodecName() {
return ECReplicationConfig.EcCodec.RS.name().toLowerCase();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawDecoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawDecoder.java
index 96f8581..31a97c7 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawDecoder.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawDecoder.java
@@ -55,4 +55,4 @@ protected void performDecodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
public void release() {
hadoopNativeXORRawDecoder.release();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawEncoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawEncoder.java
index 91be762..7fec7f2 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawEncoder.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawEncoder.java
@@ -55,4 +55,4 @@ protected void performEncodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
public void release() {
hadoopNativeXORRawEncoder.release();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawErasureCoderFactory.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawErasureCoderFactory.java
index 4a2eb50..06bc70d 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawErasureCoderFactory.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/NativeXORRawErasureCoderFactory.java
@@ -50,4 +50,4 @@ public String getCoderName() {
public String getCodecName() {
return ECReplicationConfig.EcCodec.XOR.name().toLowerCase();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/package-info.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/package-info.java
index 7a0874d..b3554d2 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/package-info.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/package-info.java
@@ -35,4 +35,4 @@
package org.apache.ozone.erasurecode.rawcoder;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/CodecUtil.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/CodecUtil.java
index 4b4ab41..22a98cc 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/CodecUtil.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/CodecUtil.java
@@ -108,4 +108,4 @@ public static RawErasureDecoder createRawDecoderWithFallback(
"Fail to create raw erasure " + "decoder with given codec: "
+ codecName);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/package-info.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/package-info.java
index b3145bf..dbbb225 100644
--- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/package-info.java
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/package-info.java
@@ -24,4 +24,4 @@
package org.apache.ozone.erasurecode.rawcoder.util;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestCodecRawCoderMapping.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestCodecRawCoderMapping.java
index 5d749ee..c2d5cc9 100644
--- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestCodecRawCoderMapping.java
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestCodecRawCoderMapping.java
@@ -65,4 +65,4 @@ public void testXORRawCoder() {
Assert.assertTrue(decoder instanceof XORRawDecoder);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeRSRawCoder.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeRSRawCoder.java
index 2ab038b..1a6ce9e 100644
--- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeRSRawCoder.java
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeRSRawCoder.java
@@ -126,4 +126,4 @@ public void testAfterRelease63() throws Exception {
prepare(6, 3, null, null);
testAfterRelease();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeXORRawCoder.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeXORRawCoder.java
index 9312c25..16f1a1f 100644
--- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeXORRawCoder.java
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestNativeXORRawCoder.java
@@ -42,4 +42,4 @@ public void testAfterRelease63() throws Exception {
prepare(6, 3, null, null);
testAfterRelease();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index 4f82a35..d49e868 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -49,6 +49,10 @@
</dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
+ <artifactId>hdds-managed-rocksdb</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.ozone</groupId>
<artifactId>hdds-hadoop-dependency-server</artifactId>
</dependency>
<dependency>
@@ -110,10 +114,6 @@
</exclusions>
</dependency>
<dependency>
- <groupId>org.rocksdb</groupId>
- <artifactId>rocksdbjni</artifactId>
- </dependency>
- <dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient_dropwizard</artifactId>
</dependency>
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
index 948057e..64d21d1 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.conf;
\ No newline at end of file
+package org.apache.hadoop.hdds.conf;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/freon/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/freon/package-info.java
index 381c811..945b74c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/freon/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/freon/package-info.java
@@ -21,4 +21,4 @@
/**
* Freon related helper classes used for load testing.
*/
-package org.apache.hadoop.hdds.freon;
\ No newline at end of file
+package org.apache.hadoop.hdds.freon;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
index 7dae0fc..b03ccfa 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains HDDS protocol related classes.
*/
-package org.apache.hadoop.hdds.protocol;
\ No newline at end of file
+package org.apache.hadoop.hdds.protocol;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
index bbbad8b..19bae37 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
@@ -416,4 +416,4 @@ public long revokeCertificates(List<String> certIds, int reason,
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java
index 4496019..ad0676f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java
@@ -19,4 +19,4 @@
package org.apache.hadoop.hdds.protocolPB;
/**
* This package contains classes for wiring HDDS protobuf calls to rpc.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeletedBlocksTransactionInfoWrapper.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeletedBlocksTransactionInfoWrapper.java
new file mode 100644
index 0000000..64ced8d
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeletedBlocksTransactionInfoWrapper.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import java.util.List;
+
+/**
+ * The wrapper for {@link DeletedBlocksTransactionInfo}.
+ */
+public class DeletedBlocksTransactionInfoWrapper {
+
+ private final long txID;
+ private final long containerID;
+ private final List<Long> localIdList;
+ private final int count;
+
+ public DeletedBlocksTransactionInfoWrapper(long txID, long containerID,
+ List<Long> localIdList, int count) {
+ this.txID = txID;
+ this.containerID = containerID;
+ this.localIdList = localIdList;
+ this.count = count;
+ }
+
+ public static DeletedBlocksTransactionInfoWrapper fromProtobuf(
+ DeletedBlocksTransactionInfo txn) {
+ if (txn.hasTxID() && txn.hasContainerID() && txn.hasCount()) {
+ return new DeletedBlocksTransactionInfoWrapper(
+ txn.getTxID(),
+ txn.getContainerID(),
+ txn.getLocalIDList(),
+ txn.getCount());
+ }
+ return null;
+ }
+
+ public static DeletedBlocksTransactionInfo toProtobuf(
+ DeletedBlocksTransactionInfoWrapper wrapper) {
+ return DeletedBlocksTransactionInfo.newBuilder()
+ .setTxID(wrapper.txID)
+ .setContainerID(wrapper.containerID)
+ .addAllLocalID(wrapper.localIdList)
+ .setCount(wrapper.count)
+ .build();
+ }
+
+ public static DeletedBlocksTransactionInfo fromTxn(
+ DeletedBlocksTransaction txn) {
+ return DeletedBlocksTransactionInfo.newBuilder()
+ .setTxID(txn.getTxID())
+ .setContainerID(txn.getContainerID())
+ .addAllLocalID(txn.getLocalIDList())
+ .setCount(txn.getCount())
+ .build();
+ }
+
+ public static DeletedBlocksTransaction toTxn(
+ DeletedBlocksTransactionInfo info) {
+ return DeletedBlocksTransaction.newBuilder()
+ .setTxID(info.getTxID())
+ .setContainerID(info.getContainerID())
+ .addAllLocalID(info.getLocalIDList())
+ .setCount(info.getCount())
+ .build();
+ }
+
+
+ public long getTxID() {
+ return txID;
+ }
+
+ public long getContainerID() {
+ return containerID;
+ }
+
+ public List<Long> getLocalIdList() {
+ return localIdList;
+ }
+
+ public int getCount() {
+ return count;
+ }
+
+ @Override
+ public String toString() {
+ return "DeletedBlocksTransactionInfoWrapper{" +
+ "txID=" + txID +
+ ", containerID=" + containerID +
+ ", localIdList=" + localIdList +
+ ", count=" + count +
+ '}';
+ }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
index ffe0d3d..1e09201 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
@@ -19,4 +19,4 @@
/**
Contains protocol buffer helper classes and utilites used in
impl.
- **/
\ No newline at end of file
+ **/
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
index 3dcf08b..a684494 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
@@ -18,4 +18,4 @@
/**
* Metadata specific package utility for SCM.
*/
-package org.apache.hadoop.hdds.scm.metadata;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.metadata;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 0f7d1c0..c664a42 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -25,12 +25,15 @@
import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipRequestProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.UpgradeFinalizationStatus;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.FinalizeScmUpgradeRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.FinalizeScmUpgradeResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetFailedDeletedBlocksTxnRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetFailedDeletedBlocksTxnResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.QueryUpgradeFinalizationProgressRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.QueryUpgradeFinalizationProgressResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SafeModeRuleStatusProto;
@@ -714,6 +717,21 @@ public void transferLeadership(String nodeId)
}
@Override
+ public List<DeletedBlocksTransactionInfo> getFailedDeletedBlockTxn(int count,
+ long startTxId) throws IOException {
+ GetFailedDeletedBlocksTxnRequestProto request =
+ GetFailedDeletedBlocksTxnRequestProto.newBuilder()
+ .setCount(count)
+ .setStartTxId(startTxId)
+ .build();
+ GetFailedDeletedBlocksTxnResponseProto resp = submitRequest(
+ Type.GetFailedDeletedBlocksTransaction,
+ builder -> builder.setGetFailedDeletedBlocksTxnRequest(request)).
+ getGetFailedDeletedBlocksTxnResponse();
+ return resp.getDeletedBlocksTransactionsList();
+ }
+
+ @Override
public int resetDeletedBlockRetryCount(List<Long> txIDs)
throws IOException {
ResetDeletedBlockRetryCountRequestProto request =
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/package-info.java
index 6fae144..65183f5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/package-info.java
@@ -19,4 +19,4 @@
/**
* CRL client package.
*/
-package org.apache.hadoop.hdds.scm.update.client;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.update.client;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java
index c3b2fb8..07c5867 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java
@@ -19,4 +19,4 @@
/**
* CRL server package.
*/
-package org.apache.hadoop.hdds.scm.update.server;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.update.server;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java
index 833a466..9f335e7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java
@@ -180,4 +180,4 @@ static OzoneSecretKey readProtoBuf(byte[] identifier) throws IOException {
return readProtoBuf(in);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java
index 89a0ab9..53e6400 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java
@@ -175,4 +175,4 @@ private X509ExtendedKeyManager loadKeyManager(CertificateClient caClient)
currentCertId = cert.getSerialNumber().toString();
return keyManager;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
index af53904..b8ba02b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
@@ -19,4 +19,4 @@
/**
* Classes related to Certificate Life Cycle or Certificate Authority Server.
*/
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.certificate.authority;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultCAProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultCAProfile.java
index 5f6025a..737fa54 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultCAProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultCAProfile.java
@@ -81,4 +81,4 @@ public KeyUsage getKeyUsage() {
| KeyUsage.dataEncipherment | KeyUsage.keyAgreement | KeyUsage.cRLSign
| KeyUsage.keyCertSign);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/package-info.java
index 00dfcb2..6b56691 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/package-info.java
@@ -30,4 +30,4 @@
* An excellent example of a profile would be ozone profile if you would
* like to see a reference to create your own profiles.
*/
-package org.apache.hadoop.hdds.security.x509.certificate.authority.profile;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.certificate.authority.profile;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
index 3b6b520..8b68729 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
@@ -157,4 +157,4 @@ public CertificateSignRequest.Builder getCSRBuilder(KeyPair keyPair)
throw new UnsupportedOperationException("getCSRBuilder of " +
"SCMCertificateClient is not supported currently");
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java
index dea609b..6f67b57 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java
@@ -19,4 +19,4 @@
/**
* Classes related to creating and using certificates.
*/
-package org.apache.hadoop.hdds.security.x509.certificate.client;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.certificate.client;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java
index 4971d4a..752df0a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java
@@ -19,4 +19,4 @@
/**
* Certificate Utils.
*/
-package org.apache.hadoop.hdds.security.x509.certificate.utils;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.certificate.utils;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/exception/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/exception/package-info.java
index 6c6393c..a3ffb7f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/exception/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/exception/package-info.java
@@ -20,4 +20,4 @@
/**
* Exceptions thrown by X.509 security classes.
*/
-package org.apache.hadoop.hdds.security.x509.exception;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.exception;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
index 37a04d6..4fffbf7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
@@ -20,4 +20,4 @@
/**
* Utils for private and public keys.
*/
-package org.apache.hadoop.hdds.security.x509.keys;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.keys;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
index a6369c6..33f6071 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
@@ -96,4 +96,4 @@ The CA implementation ( as of now it is called DefaultCA) receives a CSR from
Once the PKI Profile validates the request, it is either auto approved or
queued for manual review.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
index 144cec8..1e1b9bd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
@@ -24,6 +24,7 @@
import java.util.Optional;
import java.util.OptionalInt;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
@@ -43,6 +44,7 @@
import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
import static org.apache.hadoop.hdds.HddsUtils.createDir;
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import static org.apache.hadoop.hdds.server.http.HttpConfig.getHttpPolicy;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_GROUPS;
@@ -70,6 +72,7 @@ public abstract class BaseHttpServer {
static final String PROMETHEUS_SINK = "PROMETHEUS_SINK";
private static final String JETTY_BASETMPDIR =
"org.eclipse.jetty.webapp.basetempdir";
+ public static final String SERVER_DIR = "/webserver";
private HttpServer2 httpServer;
private final MutableConfigurationSource conf;
@@ -178,14 +181,22 @@ public BaseHttpServer(MutableConfigurationSource conf, String name)
}
String baseDir = conf.get(OzoneConfigKeys.OZONE_HTTP_BASEDIR);
- if (!StringUtils.isEmpty(baseDir)) {
- createDir(baseDir);
- httpServer.getWebAppContext().setAttribute(JETTY_BASETMPDIR, baseDir);
- LOG.info("HTTP server of {} uses base directory {}", name, baseDir);
+
+ if (StringUtils.isEmpty(baseDir)) {
+ baseDir = getOzoneMetaDirPath(conf) + SERVER_DIR;
}
+ createDir(baseDir);
+ httpServer.getWebAppContext().setAttribute(JETTY_BASETMPDIR, baseDir);
+ LOG.info("HTTP server of {} uses base directory {}", name, baseDir);
}
}
+ @VisibleForTesting
+ public String getJettyBaseTmpDir() {
+ return httpServer.getWebAppContext().getAttribute(JETTY_BASETMPDIR)
+ .toString();
+ }
+
/**
* Return a HttpServer.Builder that the OzoneManager/SCM/Datanode/S3Gateway/
* Recon to initialize their HTTP / HTTPS server.
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
index 306040d..d2165b9d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
@@ -72,4 +72,4 @@ public static Policy getHttpPolicy(MutableConfigurationSource conf) {
conf.set(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY, policy.name());
return policy;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/RatisDropwizardExports.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/RatisDropwizardExports.java
index 834c7d8..813deed 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/RatisDropwizardExports.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/RatisDropwizardExports.java
@@ -135,4 +135,4 @@ void removeFromGlobalRegistration() {
MetricRegistries.global().removeReporterRegistration(reporter, stopper);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/package-info.java
index b8b7cc9..0f17ac7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/package-info.java
@@ -20,4 +20,4 @@
/**
* Servlets and utilities for embedded web server of Ozone services..
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java
index 35ad5e7..ae0eb77 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java
@@ -20,4 +20,4 @@
/**
* Common server side utilities for all the hdds/ozone server components.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
index 3dc1766..d3f3cd1 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java
@@ -53,6 +53,11 @@ public static DBCheckpointMetrics create(String parent) {
new DBCheckpointMetrics());
}
+ public void unRegister() {
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ ms.unregisterSource(SOURCE_NAME);
+ }
+
@VisibleForTesting
public void setLastCheckpointCreationTimeTaken(long val) {
this.lastCheckpointCreationTimeTaken.set(val);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileLoader.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileLoader.java
index 16655cc..5815fc7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileLoader.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileLoader.java
@@ -35,4 +35,4 @@ public interface DumpFileLoader extends Closeable {
* Close this file loader.
*/
void close();
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringCodec.java
index a493e6a..eff7039 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringCodec.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringCodec.java
@@ -47,4 +47,4 @@ public String fromPersistedFormat(byte[] rawData) throws IOException {
public String copyObject(String object) {
return object;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringUtils.java
index ce2c59a..31eee1c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/FixedLengthStringUtils.java
@@ -54,4 +54,4 @@ public static String bytes2String(byte[] bytes) {
"ISO_8859_1 decoding is not supported", e);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java
index 1f88083..3c88cf2 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java
@@ -57,4 +57,4 @@ public void load(File externalFile) throws IOException {
@Override
public void close() {
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
index 31b2774..d662dcf 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java
@@ -101,4 +101,4 @@ private void closeOnFailure() {
closeResources();
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java
index 7ce7f37..21fefdd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java
@@ -79,4 +79,4 @@ public void cleanupCheckpoint() throws IOException {
checkpointLocation.toString());
FileUtils.deleteDirectory(checkpointLocation.toFile());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
index 8b56bff..c5e7952 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
@@ -19,4 +19,4 @@
/**
* Database interfaces for Ozone.
*/
-package org.apache.hadoop.hdds.utils.db;
\ No newline at end of file
+package org.apache.hadoop.hdds.utils.db;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/package-info.java
index 6e86913..323ba4f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/package-info.java
@@ -24,4 +24,4 @@
package org.apache.hadoop.hdds.utils;
/**
* Generic server side utilities.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/package-info.java
index 6517e58..afcd396 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/common/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.common;
\ No newline at end of file
+package org.apache.hadoop.ozone.common;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
index 6d60a9d..dd8c277 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
@@ -309,4 +309,4 @@ public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) {
public boolean verifyCert(Certificate certificate) {
return true;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
index 1d20a78..4edcf63 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java
@@ -19,4 +19,4 @@
/**
* Tests for Default CA.
*/
-package org.apache.hadoop.hdds.security.x509.certificate.authority;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.certificate.authority;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
index 109a27e..46192d9 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
@@ -560,4 +560,4 @@ public void testRenewAndStoreKeyAndCertificate() throws Exception {
// a success renew after auto cleanup new key and cert dir
dnCertClient.renewAndStoreKeyAndCertificate(true);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
index 9b31426..7850798 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
@@ -155,4 +155,4 @@ private X509Certificate getX509Certificate() throws Exception {
return KeyStoreTestUtil.generateCertificate(
"CN=Test", keyPair, 365, securityConfig.getSignatureAlgo());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java
index 6b7f032..9b8eaaa 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java
@@ -276,4 +276,4 @@ private X509CertificateHolder generateTestCert()
.build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
index 6b84bd5..d96e942 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
@@ -84,4 +84,4 @@ public void testGenerateKeyWithSize() throws NoSuchProviderException,
((RSAPublicKey)(publicKey)).getModulus().bitLength());
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java
index 190223c..83b9a80 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java
@@ -230,4 +230,4 @@ public void testReadWritePublicKeywithoutArgs()
Assertions.assertNotNull(pubKey);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
index 49e40b4..dc5f1c8 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java
@@ -19,4 +19,4 @@
/**
* Test package for keys used in X.509 env.
*/
-package org.apache.hadoop.hdds.security.x509.keys;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509.keys;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java
index f541468..6fbb202 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java
@@ -19,4 +19,4 @@
/**
* X.509 Certificate and keys related tests.
*/
-package org.apache.hadoop.hdds.security.x509;
\ No newline at end of file
+package org.apache.hadoop.hdds.security.x509;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
index 767941e..473f349 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -151,4 +151,4 @@ public void multipleSubscriber() {
Assertions.assertEquals(23, result[1]);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
index 40b5e96..2a1febe 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java
@@ -76,4 +76,4 @@ public void onMessage(FailedNode message, EventPublisher publisher) {
System.out.println("Clear timer");
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
index 720dd6f..2710df3 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java
@@ -19,4 +19,4 @@
/**
* Tests for Event Watcher.
*/
-package org.apache.hadoop.hdds.server.events;
\ No newline at end of file
+package org.apache.hadoop.hdds.server.events;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java
index bb49013..0ae1fe7 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java
@@ -106,4 +106,4 @@ protected String getHttpAuthConfigPrefix() {
"default", 65).toString());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestProfileServlet.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestProfileServlet.java
index f12a218..dec8e8c 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestProfileServlet.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestProfileServlet.java
@@ -61,4 +61,4 @@ public void testNameValidationWithSlash() {
ProfileServlet.generateFileName(1, Output.SVG, Event.ALLOC)));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java
index c3e6746..650a1cb 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisDropwizardExports.java
@@ -64,4 +64,4 @@ public void export() throws IOException {
"Instance name is not moved to be a tag");
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisNameRewrite.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisNameRewrite.java
index 774ccf1..9d4b1fd 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisNameRewrite.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestRatisNameRewrite.java
@@ -103,4 +103,4 @@ public void normalizeRatisMetricName(String originalName, String expectedName,
Assertions.assertEquals(Arrays.asList(expectedTagValues), values);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/package-info.java
index 4f67352..a4e3cc8 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/package-info.java
@@ -20,4 +20,4 @@
/**
* Testing embedded web server of Ozone services..
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestPrometheusMetricsSinkUtil.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestPrometheusMetricsSinkUtil.java
index ef64bbb..a417ecc 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestPrometheusMetricsSinkUtil.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestPrometheusMetricsSinkUtil.java
@@ -226,4 +226,4 @@ void testGetUsername() {
Assertions.assertNull(username);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestUgiMetricsUtil.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestUgiMetricsUtil.java
index 75ee8cd..268bcd7 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestUgiMetricsUtil.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestUgiMetricsUtil.java
@@ -60,4 +60,4 @@ void testCreateServernameTagWithCompatibleKey() {
optionalMetricsTag.get().description());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
index 11b386c..da79bfa 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
@@ -111,4 +111,4 @@ public void readFromFileInvalidConfig() throws IOException {
// This has to return a Null, since we have config defined for badfile.db
Assertions.assertNull(options);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
index e7f9d02..bb42943 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
@@ -232,4 +232,4 @@ public File getDBLocation(ConfigurationSource conf) {
Assert.assertTrue(checked);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestFixedLengthStringUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestFixedLengthStringUtils.java
index d2ad45b..b6d1e6a 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestFixedLengthStringUtils.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestFixedLengthStringUtils.java
@@ -42,4 +42,4 @@ public void testStringEncodeAndDecode() {
assertEquals(containerID, decodedContainerID);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index 36d00ff..21b1d53 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -371,4 +371,4 @@ public void testDowngrade() throws Exception {
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java
index f97fda2..3054d6d 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java
@@ -19,4 +19,4 @@
/**
* Tests for the DB Cache Utilities.
*/
-package org.apache.hadoop.hdds.utils.db.cache;
\ No newline at end of file
+package org.apache.hadoop.hdds.utils.db.cache;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java
index f1c7ce1..d382059 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java
@@ -19,4 +19,4 @@
/**
* Tests for the DB Utilities.
*/
-package org.apache.hadoop.hdds.utils.db;
\ No newline at end of file
+package org.apache.hadoop.hdds.utils.db;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
index f93e3fd..3f1a394 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/package-info.java
@@ -19,4 +19,4 @@
/**
* DB test Utils.
*/
-package org.apache.hadoop.hdds.utils;
\ No newline at end of file
+package org.apache.hadoop.hdds.utils;
diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
index 2aac8b2..d5a3c6f 100644
--- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
+++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
@@ -79,6 +79,7 @@
optional ReplicationManagerReportRequestProto replicationManagerReportRequest = 40;
optional ResetDeletedBlockRetryCountRequestProto resetDeletedBlockRetryCountRequest = 41;
optional TransferLeadershipRequestProto transferScmLeadershipRequest = 42;
+ optional GetFailedDeletedBlocksTxnRequestProto getFailedDeletedBlocksTxnRequest = 43;
}
message ScmContainerLocationResponse {
@@ -129,6 +130,7 @@
optional ReplicationManagerReportResponseProto getReplicationManagerReportResponse = 40;
optional ResetDeletedBlockRetryCountResponseProto resetDeletedBlockRetryCountResponse = 41;
optional TransferLeadershipResponseProto transferScmLeadershipResponse = 42;
+ optional GetFailedDeletedBlocksTxnResponseProto getFailedDeletedBlocksTxnResponse = 43;
enum Status {
OK = 1;
@@ -178,6 +180,7 @@
ResetDeletedBlockRetryCount = 36;
GetClosedContainerCount = 37;
TransferLeadership = 38;
+ GetFailedDeletedBlocksTransaction = 39;
}
/**
@@ -488,6 +491,16 @@
required ReplicationManagerReportProto report = 1;
}
+message GetFailedDeletedBlocksTxnRequestProto {
+ optional string traceID = 1;
+ required int32 count = 2;
+ optional int64 startTxId = 3;
+}
+
+message GetFailedDeletedBlocksTxnResponseProto {
+ repeated DeletedBlocksTransactionInfo deletedBlocksTransactions = 1;
+}
+
message ResetDeletedBlockRetryCountRequestProto {
optional string traceID = 1;
repeated int64 transactionId = 2;
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 2a07d2d..16ea488 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -458,4 +458,11 @@
}
message TransferLeadershipResponseProto {
+}
+
+message DeletedBlocksTransactionInfo {
+ optional int64 txID = 1;
+ optional int64 containerID = 2;
+ repeated int64 localID = 3;
+ optional int32 count = 4;
}
\ No newline at end of file
diff --git a/hadoop-hdds/managed-rocksdb/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/managed-rocksdb/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 0000000..55abc26
--- /dev/null
+++ b/hadoop-hdds/managed-rocksdb/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<FindBugsFilter>
+</FindBugsFilter>
diff --git a/hadoop-hdds/managed-rocksdb/pom.xml b/hadoop-hdds/managed-rocksdb/pom.xml
new file mode 100644
index 0000000..47cf941
--- /dev/null
+++ b/hadoop-hdds/managed-rocksdb/pom.xml
@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>hdds</artifactId>
+ <version>1.4.0-SNAPSHOT</version>
+ </parent>
+ <artifactId>hdds-managed-rocksdb</artifactId>
+ <version>1.4.0-SNAPSHOT</version>
+ <description>Apache Ozone Managed RocksDB library</description>
+ <name>Apache Ozone HDDS Managed RocksDB</name>
+ <packaging>jar</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>hdds-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.rocksdb</groupId>
+ <artifactId>rocksdbjni</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build/>
+</project>
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBlockBasedTableConfig.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBlockBasedTableConfig.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBlockBasedTableConfig.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBlockBasedTableConfig.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCheckpoint.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCheckpoint.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCheckpoint.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCheckpoint.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksIterator.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksIterator.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksIterator.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksIterator.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReader.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReader.java
new file mode 100644
index 0000000..7ba1001
--- /dev/null
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReader.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import org.rocksdb.SstFileReader;
+
+/**
+ * Managed SstFileReader.
+ */
+public class ManagedSstFileReader extends ManagedObject<SstFileReader> {
+
+ ManagedSstFileReader(SstFileReader original) {
+ super(original);
+ }
+
+ public static ManagedSstFileReader managed(
+ SstFileReader reader) {
+ return new ManagedSstFileReader(reader);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ ManagedRocksObjectUtils.assertClosed(this);
+ super.finalize();
+ }
+
+}
diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReaderIterator.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReaderIterator.java
new file mode 100644
index 0000000..0916e89
--- /dev/null
+++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileReaderIterator.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import org.rocksdb.SstFileReaderIterator;
+
+/**
+ * Managed SstFileReaderIterator.
+ */
+public class ManagedSstFileReaderIterator
+ extends ManagedObject<SstFileReaderIterator> {
+
+ ManagedSstFileReaderIterator(SstFileReaderIterator original) {
+ super(original);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ ManagedRocksObjectUtils.assertClosed(this);
+ super.finalize();
+ }
+
+ public static ManagedSstFileReaderIterator managed(
+ SstFileReaderIterator iterator) {
+ return new ManagedSstFileReaderIterator(iterator);
+ }
+
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedTransactionLogIterator.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedTransactionLogIterator.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedTransactionLogIterator.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedTransactionLogIterator.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java
rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index e93b621..58ef24a 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -40,6 +40,7 @@
<module>client</module>
<module>common</module>
<module>framework</module>
+ <module>managed-rocksdb</module>
<module>rocksdb-checkpoint-differ</module>
<module>container-service</module>
<module>server-scm</module>
@@ -74,6 +75,12 @@
<dependency>
<groupId>org.apache.ozone</groupId>
+ <artifactId>hdds-managed-rocksdb</artifactId>
+ <version>${hdds.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.ozone</groupId>
<artifactId>hdds-hadoop-dependency-client</artifactId>
<version>${hdds.version}</version>
</dependency>
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
index 842994c..f2a932b 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
@@ -37,7 +37,10 @@
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>hdds-common</artifactId>
- <version>${hdds.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>hdds-managed-rocksdb</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RelationshipEdge.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RelationshipEdge.java
index 7e63a60..b43ef9f 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RelationshipEdge.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RelationshipEdge.java
@@ -27,4 +27,4 @@ class RelationshipEdge {
public String toString() {
return "";
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index b74de85..e483403 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -35,11 +35,9 @@
import org.rocksdb.DBOptions;
import org.rocksdb.LiveFileMetaData;
import org.rocksdb.Options;
-import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
import org.rocksdb.SstFileReader;
-import org.rocksdb.SstFileReaderIterator;
import org.rocksdb.TableProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -759,7 +757,7 @@ private String getSSTFullPath(String sstFilenameWithoutExtension,
* "/path/to/sstBackupDir/000060.sst"]
*/
public List<String> getSSTDiffListWithFullPath(
- DifferSnapshotInfo src, DifferSnapshotInfo dest) {
+ DifferSnapshotInfo src, DifferSnapshotInfo dest) throws IOException {
List<String> sstDiffList = getSSTDiffList(src, dest);
@@ -780,7 +778,7 @@ public List<String> getSSTDiffListWithFullPath(
* @return A list of SST files without extension. e.g. ["000050", "000060"]
*/
public synchronized List<String> getSSTDiffList(
- DifferSnapshotInfo src, DifferSnapshotInfo dest) {
+ DifferSnapshotInfo src, DifferSnapshotInfo dest) throws IOException {
// TODO: Reject or swap if dest is taken after src, once snapshot chain
// integration is done.
@@ -816,42 +814,26 @@ public synchronized List<String> getSSTDiffList(
}
if (src.getTablePrefixes() != null && !src.getTablePrefixes().isEmpty()) {
- filterRelevantSstFiles(fwdDAGDifferentFiles, src.getTablePrefixes());
+ filterRelevantSstFilesFullPath(fwdDAGDifferentFiles,
+ src.getTablePrefixes());
}
return new ArrayList<>(fwdDAGDifferentFiles);
}
- public void filterRelevantSstFiles(Set<String> inputFiles,
- Map<String, String> tableToPrefixMap) {
+ /**
+ * construct absolute sst file path first and
+ * filter the files.
+ */
+ public void filterRelevantSstFilesFullPath(Set<String> inputFiles,
+ Map<String, String> tableToPrefixMap) throws IOException {
for (Iterator<String> fileIterator =
inputFiles.iterator(); fileIterator.hasNext();) {
String filename = fileIterator.next();
String filepath = getAbsoluteSstFilePath(filename);
- try (SstFileReader sstFileReader = new SstFileReader(new Options())) {
- sstFileReader.open(filepath);
- TableProperties properties = sstFileReader.getTableProperties();
- String tableName = new String(properties.getColumnFamilyName(), UTF_8);
- if (tableToPrefixMap.containsKey(tableName)) {
- String prefix = tableToPrefixMap.get(tableName);
- SstFileReaderIterator iterator =
- sstFileReader.newIterator(new ReadOptions());
- iterator.seekToFirst();
- String firstKey = RocksDiffUtils
- .constructBucketKey(new String(iterator.key(), UTF_8));
- iterator.seekToLast();
- String lastKey = RocksDiffUtils
- .constructBucketKey(new String(iterator.key(), UTF_8));
- if (!RocksDiffUtils
- .isKeyWithPrefixPresent(prefix, firstKey, lastKey)) {
- fileIterator.remove();
- }
- } else {
- // entry from other tables
- fileIterator.remove();
- }
- } catch (RocksDBException e) {
- e.printStackTrace();
+ if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath,
+ tableToPrefixMap)) {
+ fileIterator.remove();
}
}
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java
index d6d25bb..cca3eaf 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java
@@ -18,13 +18,32 @@
package org.apache.ozone.rocksdiff;
import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator;
+import org.rocksdb.SstFileReader;
+import org.rocksdb.TableProperties;
+import org.rocksdb.Options;
+import org.rocksdb.ReadOptions;
+import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
/**
* Helper methods for snap-diff operations.
*/
public final class RocksDiffUtils {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RocksDiffUtils.class);
+
private RocksDiffUtils() {
}
@@ -35,20 +54,56 @@ public static boolean isKeyWithPrefixPresent(String prefixForColumnFamily,
}
public static String constructBucketKey(String keyName) {
- if (!keyName.startsWith(OzoneConsts.OM_KEY_PREFIX)) {
- keyName = OzoneConsts.OM_KEY_PREFIX.concat(keyName);
+ if (!keyName.startsWith(OM_KEY_PREFIX)) {
+ keyName = OM_KEY_PREFIX.concat(keyName);
}
- String[] elements = keyName.split(OzoneConsts.OM_KEY_PREFIX);
+ String[] elements = keyName.split(OM_KEY_PREFIX);
String volume = elements[1];
String bucket = elements[2];
StringBuilder builder =
- new StringBuilder().append(OzoneConsts.OM_KEY_PREFIX).append(volume);
+ new StringBuilder().append(OM_KEY_PREFIX).append(volume);
if (StringUtils.isNotBlank(bucket)) {
- builder.append(OzoneConsts.OM_KEY_PREFIX).append(bucket);
+ builder.append(OM_KEY_PREFIX).append(bucket);
}
return builder.toString();
}
+ public static void filterRelevantSstFiles(Set<String> inputFiles,
+ Map<String, String> tableToPrefixMap) throws IOException {
+ for (Iterator<String> fileIterator =
+ inputFiles.iterator(); fileIterator.hasNext();) {
+ String filepath = fileIterator.next();
+ if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath,
+ tableToPrefixMap)) {
+ fileIterator.remove();
+ }
+ }
+ }
+
+ public static boolean doesSstFileContainKeyRange(String filepath,
+ Map<String, String> tableToPrefixMap) throws IOException {
+ try (ManagedSstFileReader sstFileReader = ManagedSstFileReader.managed(
+ new SstFileReader(new Options()))) {
+ sstFileReader.get().open(filepath);
+ TableProperties properties = sstFileReader.get().getTableProperties();
+ String tableName = new String(properties.getColumnFamilyName(), UTF_8);
+ if (tableToPrefixMap.containsKey(tableName)) {
+ String prefix = tableToPrefixMap.get(tableName) + OM_KEY_PREFIX;
+ try (ManagedSstFileReaderIterator iterator =
+ ManagedSstFileReaderIterator.managed(sstFileReader.get()
+ .newIterator(new ReadOptions()))) {
+ iterator.get().seek(prefix.getBytes(UTF_8));
+ String seekResultKey = new String(iterator.get().key(), UTF_8);
+ return seekResultKey.startsWith(prefix);
+ }
+ }
+ return false;
+ } catch (RocksDBException e) {
+ LOG.error("Failed to read SST File ", e);
+ throw new IOException(e);
+ }
+ }
+
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/package-info.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/package-info.java
index 1a51012..459528a 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/package-info.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/package-info.java
@@ -20,4 +20,4 @@
/**
* Generic ozone specific classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index 871b274..f634222 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -330,7 +330,8 @@ void testDifferWithDB() throws Exception {
/**
* Test SST differ.
*/
- void diffAllSnapshots(RocksDBCheckpointDiffer differ) {
+ void diffAllSnapshots(RocksDBCheckpointDiffer differ)
+ throws IOException {
final DifferSnapshotInfo src = snapshots.get(snapshots.size() - 1);
// Hard-coded expected output.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index 4185670..e485fcc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -71,4 +71,4 @@ List<String> getTransactionIDList(UUID dnId) {
boolean isEmpty() {
return transactions.isEmpty();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index ddd7085..ea20139 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -51,15 +51,17 @@ DatanodeDeletedBlockTransactions getTransactions(int blockDeletionLimit)
throws IOException, TimeoutException;
/**
- * Return all failed transactions in the log. A transaction is considered
- * to be failed if it has been sent more than MAX_RETRY limit and its
- * count is reset to -1.
+ * Return the failed transactions in the log. A transaction is
+ * considered to be failed if it has been sent more than MAX_RETRY limit
+ * and its count is reset to -1.
*
+ * @param count Maximum num of returned transactions, if < 0. return all.
+ * @param startTxId The least transaction id to start with.
* @return a list of failed deleted block transactions.
* @throws IOException
*/
- List<DeletedBlocksTransaction> getFailedTransactions()
- throws IOException;
+ List<DeletedBlocksTransaction> getFailedTransactions(int count,
+ long startTxId) throws IOException;
/**
* Increments count for given list of transactions by 1.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 6903d5e..589a9f3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -92,6 +92,8 @@ public class DeletedBlockLogImpl
private final SequenceIdGenerator sequenceIdGen;
private final ScmBlockDeletingServiceMetrics metrics;
+ private static final int LIST_ALL_FAILED_TRANSACTIONS = -1;
+
@SuppressWarnings("parameternumber")
public DeletedBlockLogImpl(ConfigurationSource conf,
ContainerManager containerManager,
@@ -126,18 +128,27 @@ public DeletedBlockLogImpl(ConfigurationSource conf,
}
@Override
- public List<DeletedBlocksTransaction> getFailedTransactions()
- throws IOException {
+ public List<DeletedBlocksTransaction> getFailedTransactions(int count,
+ long startTxId) throws IOException {
lock.lock();
try {
final List<DeletedBlocksTransaction> failedTXs = Lists.newArrayList();
try (TableIterator<Long,
? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
deletedBlockLogStateManager.getReadOnlyIterator()) {
- while (iter.hasNext()) {
- DeletedBlocksTransaction delTX = iter.next().getValue();
- if (delTX.getCount() == -1) {
- failedTXs.add(delTX);
+ if (count == LIST_ALL_FAILED_TRANSACTIONS) {
+ while (iter.hasNext()) {
+ DeletedBlocksTransaction delTX = iter.next().getValue();
+ if (delTX.getCount() == -1) {
+ failedTXs.add(delTX);
+ }
+ }
+ } else {
+ while (iter.hasNext() && failedTXs.size() < count) {
+ DeletedBlocksTransaction delTX = iter.next().getValue();
+ if (delTX.getCount() == -1 && delTX.getTxID() >= startTxId) {
+ failedTXs.add(delTX);
+ }
}
}
}
@@ -191,7 +202,7 @@ public int resetCount(List<Long> txIDs) throws IOException, TimeoutException {
lock.lock();
try {
if (txIDs == null || txIDs.isEmpty()) {
- txIDs = getFailedTransactions().stream()
+ txIDs = getFailedTransactions(LIST_ALL_FAILED_TRANSACTIONS, 0).stream()
.map(DeletedBlocksTransaction::getTxID)
.collect(Collectors.toList());
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
index ba17fb9..c2e7f2f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java
@@ -23,4 +23,4 @@
package org.apache.hadoop.hdds.scm.command;
/*
* Classes related to commands issued from SCM to DataNode.
- * */
\ No newline at end of file
+ * */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
index eb3cf7d..4a4d6e1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
@@ -408,4 +408,4 @@ protected void deleteReplica(ContainerID containerID, DatanodeDetails dn,
logger.info("Sending delete container command for " + reason +
" container {} to datanode {}", containerID.getId(), dn);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java
index 72c1cda..61e0761 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerMetrics.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hdds.scm.container.balancer;
-import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager.MoveResult;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
@@ -131,8 +130,7 @@ public void incrementNumContainerMovesCompletedInLatestIteration(
}
public void incrementCurrentIterationContainerMoveMetric(
- MoveResult result,
- long valueToAdd) {
+ MoveManager.MoveResult result, long valueToAdd) {
if (result == null) {
return;
}
@@ -145,9 +143,8 @@ public void incrementCurrentIterationContainerMoveMetric(
this.numContainerMovesTimeoutInLatestIteration.incr(valueToAdd);
break;
// TODO: Add metrics for other errors that need to be tracked.
- case FAIL_NOT_RUNNING:
+ case FAIL_LEADER_NOT_READY:
case REPLICATION_FAIL_INFLIGHT_REPLICATION:
- case FAIL_NOT_LEADER:
case REPLICATION_FAIL_NOT_EXIST_IN_SOURCE:
case REPLICATION_FAIL_EXIST_IN_TARGET:
case REPLICATION_FAIL_CONTAINER_NOT_CLOSED:
@@ -157,10 +154,10 @@ public void incrementCurrentIterationContainerMoveMetric(
case REPLICATION_FAIL_NODE_UNHEALTHY:
case DELETION_FAIL_NODE_UNHEALTHY:
case DELETE_FAIL_POLICY:
- case PLACEMENT_POLICY_NOT_SATISFIED:
- case UNEXPECTED_REMOVE_SOURCE_AT_INFLIGHT_REPLICATION:
- case UNEXPECTED_REMOVE_TARGET_AT_INFLIGHT_DELETION:
- case FAIL_CAN_NOT_RECORD_TO_DB:
+ case REPLICATION_NOT_HEALTHY_BEFORE_MOVE:
+ case REPLICATION_NOT_HEALTHY_AFTER_MOVE:
+ case FAIL_CONTAINER_ALREADY_BEING_MOVED:
+ case FAIL_UNEXPECTED_ERROR:
default:
break;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index 234ac78..fc1ddff 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -27,7 +27,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
@@ -110,8 +109,7 @@ public class ContainerBalancerTask implements Runnable {
private Set<DatanodeDetails> selectedSources;
private FindTargetStrategy findTargetStrategy;
private FindSourceStrategy findSourceStrategy;
- private Map<ContainerMoveSelection,
- CompletableFuture<LegacyReplicationManager.MoveResult>>
+ private Map<ContainerMoveSelection, CompletableFuture<MoveManager.MoveResult>>
moveSelectionToFutureMap;
private IterationResult iterationResult;
private int nextIterationIndex;
@@ -756,7 +754,7 @@ private boolean adaptOnReachingIterationLimits() {
private boolean moveContainer(DatanodeDetails source,
ContainerMoveSelection moveSelection) {
ContainerID containerID = moveSelection.getContainerID();
- CompletableFuture<LegacyReplicationManager.MoveResult> future;
+ CompletableFuture<MoveManager.MoveResult> future;
try {
ContainerInfo containerInfo = containerManager.getContainer(containerID);
future = replicationManager
@@ -772,7 +770,7 @@ private boolean moveContainer(DatanodeDetails source,
moveSelection.getTargetNode().getUuidString(), ex);
metrics.incrementNumContainerMovesFailedInLatestIteration(1);
} else {
- if (result == LegacyReplicationManager.MoveResult.COMPLETED) {
+ if (result == MoveManager.MoveResult.COMPLETED) {
sizeActuallyMovedInLatestIteration +=
containerInfo.getUsedBytes();
if (LOG.isDebugEnabled()) {
@@ -805,9 +803,9 @@ private boolean moveContainer(DatanodeDetails source,
if (future.isCompletedExceptionally()) {
return false;
} else {
- LegacyReplicationManager.MoveResult result = future.join();
+ MoveManager.MoveResult result = future.join();
moveSelectionToFutureMap.put(moveSelection, future);
- return result == LegacyReplicationManager.MoveResult.COMPLETED;
+ return result == MoveManager.MoveResult.COMPLETED;
}
} else {
moveSelectionToFutureMap.put(moveSelection, future);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java
index ff8caf6..47bdcf3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/MoveManager.java
@@ -520,4 +520,4 @@ public void opCompleted(ContainerReplicaOp op, ContainerID containerID,
notifyContainerOpCompleted(op, containerID);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
index ee02bbd..14d8f36 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
@@ -20,4 +20,4 @@
* This package has class that close a container. That is move a container from
* open state to close state.
*/
-package org.apache.hadoop.hdds.scm.container.closer;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.container.closer;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
index 3f8d056..be09927 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains routines to manage the container location and
* mapping inside SCM
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
index aaa7855..0ad1f9c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java
@@ -115,4 +115,4 @@ public long getDatanodeChooseAttemptCount() {
public void getMetrics(MetricsCollector collector, boolean all) {
registry.snapshot(collector.addRecord(registry.info().name()), true);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
index 1cb810d..3b5b0d0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java
@@ -15,4 +15,4 @@
* the License.
*/
package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-// Various placement algorithms.
\ No newline at end of file
+// Various placement algorithms.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java
index 9255303..9c23c45 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java
@@ -162,4 +162,4 @@ public String toJsonString() {
return null;
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java
index 4a81d69..26aff18 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java
@@ -17,4 +17,4 @@
package org.apache.hadoop.hdds.scm.container.placement.metrics;
// Various metrics supported by Datanode and used by SCM in the placement
-// strategy.
\ No newline at end of file
+// strategy.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java
index dc54d9b..5a11e25 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java
@@ -16,4 +16,4 @@
*/
package org.apache.hadoop.hdds.scm.container.placement;
-// Classes related to container placement.
\ No newline at end of file
+// Classes related to container placement.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerCheckRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerCheckRequest.java
index d371ff9..9d51e20 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerCheckRequest.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerCheckRequest.java
@@ -119,4 +119,4 @@ public ContainerCheckRequest build() {
return new ContainerCheckRequest(this);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/DatanodeCommandCountUpdatedHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/DatanodeCommandCountUpdatedHandler.java
index 87e5928..4cddaaf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/DatanodeCommandCountUpdatedHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/DatanodeCommandCountUpdatedHandler.java
@@ -45,4 +45,4 @@ public void onMessage(DatanodeDetails datanodeDetails,
datanodeDetails);
replicationManager.datanodeCommandCountUpdated(datanodeDetails);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
index 0f922ea..7cdd48c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
@@ -40,6 +40,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
+import org.apache.hadoop.hdds.scm.container.balancer.MoveManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -247,63 +248,12 @@ List<DatanodeDetails> getDatanodeDetails(ContainerID id) {
*/
private final InflightMap inflightDeletion;
-
- /**
- * This is used for indicating the result of move option and
- * the corresponding reason. this is useful for tracking
- * the result of move option
- */
- public enum MoveResult {
- // both replication and deletion are completed
- COMPLETED,
- // RM is not running
- FAIL_NOT_RUNNING,
- // RM is not ratis leader
- FAIL_NOT_LEADER,
- // replication fail because the container does not exist in src
- REPLICATION_FAIL_NOT_EXIST_IN_SOURCE,
- // replication fail because the container exists in target
- REPLICATION_FAIL_EXIST_IN_TARGET,
- // replication fail because the container is not cloesed
- REPLICATION_FAIL_CONTAINER_NOT_CLOSED,
- // replication fail because the container is in inflightDeletion
- REPLICATION_FAIL_INFLIGHT_DELETION,
- // replication fail because the container is in inflightReplication
- REPLICATION_FAIL_INFLIGHT_REPLICATION,
- // replication fail because of timeout
- REPLICATION_FAIL_TIME_OUT,
- // replication fail because of node is not in service
- REPLICATION_FAIL_NODE_NOT_IN_SERVICE,
- // replication fail because node is unhealthy
- REPLICATION_FAIL_NODE_UNHEALTHY,
- // deletion fail because of node is not in service
- DELETION_FAIL_NODE_NOT_IN_SERVICE,
- // replication succeed, but deletion fail because of timeout
- DELETION_FAIL_TIME_OUT,
- // replication succeed, but deletion fail because because
- // node is unhealthy
- DELETION_FAIL_NODE_UNHEALTHY,
- // replication succeed, but if we delete the container from
- // the source datanode , the policy(eg, replica num or
- // rack location) will not be satisfied, so we should not delete
- // the container
- DELETE_FAIL_POLICY,
- // replicas + target - src does not satisfy placement policy
- PLACEMENT_POLICY_NOT_SATISFIED,
- //unexpected action, remove src at inflightReplication
- UNEXPECTED_REMOVE_SOURCE_AT_INFLIGHT_REPLICATION,
- //unexpected action, remove target at inflightDeletion
- UNEXPECTED_REMOVE_TARGET_AT_INFLIGHT_DELETION,
- //write DB error
- FAIL_CAN_NOT_RECORD_TO_DB
- }
-
/**
* This is used for tracking container move commands
* which are not yet complete.
*/
private final Map<ContainerID,
- CompletableFuture<MoveResult>> inflightMoveFuture;
+ CompletableFuture<MoveManager.MoveResult>> inflightMoveFuture;
/**
* ReplicationManager specific configuration.
@@ -428,6 +378,7 @@ protected void processContainer(ContainerInfo container,
* we have to resend close container command to the datanodes.
*/
if (state == LifeCycleState.CLOSING) {
+ setHealthStateForClosing(replicas, container, report);
for (ContainerReplica replica: replicas) {
if (replica.getState() != State.UNHEALTHY) {
sendCloseCommand(
@@ -697,14 +648,20 @@ private void updateMoveIfNeeded(final boolean isUnhealthy,
//but inflightMoveFuture not. so there will be a case that
//container is in inflightMove, but not in inflightMoveFuture.
compleleteMoveFutureWithResult(id,
- MoveResult.UNEXPECTED_REMOVE_SOURCE_AT_INFLIGHT_REPLICATION);
+ MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR);
+ LOG.info("Move failed because replication for container {} " +
+ "unexpectedly happened at the source {}, not the target {}.",
+ container, kv.getSrc().getUuidString(), kv.getTgt().getUuidString());
moveScheduler.completeMove(id.getProtobuf());
return;
}
if (isTarget && !isInflightReplication) {
compleleteMoveFutureWithResult(id,
- MoveResult.UNEXPECTED_REMOVE_TARGET_AT_INFLIGHT_DELETION);
+ MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR);
+ LOG.info("Move failed because deletion for container {} unexpectedly " +
+ "happened at the target {}, not the source {}.", container,
+ kv.getTgt().getUuidString(), kv.getSrc().getUuidString());
moveScheduler.completeMove(id.getProtobuf());
return;
}
@@ -713,27 +670,26 @@ private void updateMoveIfNeeded(final boolean isUnhealthy,
if (isInflightReplication) {
if (isUnhealthy) {
compleleteMoveFutureWithResult(id,
- MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
} else if (isNotInService) {
compleleteMoveFutureWithResult(id,
- MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
} else {
compleleteMoveFutureWithResult(id,
- MoveResult.REPLICATION_FAIL_TIME_OUT);
+ MoveManager.MoveResult.REPLICATION_FAIL_TIME_OUT);
}
} else {
if (isUnhealthy) {
compleleteMoveFutureWithResult(id,
- MoveResult.DELETION_FAIL_NODE_UNHEALTHY);
+ MoveManager.MoveResult.DELETION_FAIL_NODE_UNHEALTHY);
} else if (isTimeout) {
compleleteMoveFutureWithResult(id,
- MoveResult.DELETION_FAIL_TIME_OUT);
+ MoveManager.MoveResult.DELETION_FAIL_TIME_OUT);
} else if (isNotInService) {
compleleteMoveFutureWithResult(id,
- MoveResult.DELETION_FAIL_NODE_NOT_IN_SERVICE);
+ MoveManager.MoveResult.DELETION_FAIL_NODE_NOT_IN_SERVICE);
} else {
- compleleteMoveFutureWithResult(id,
- MoveResult.COMPLETED);
+ compleleteMoveFutureWithResult(id, MoveManager.MoveResult.COMPLETED);
}
}
moveScheduler.completeMove(id.getProtobuf());
@@ -750,7 +706,7 @@ private void updateMoveIfNeeded(final boolean isUnhealthy,
* @param src source datanode
* @param tgt target datanode
*/
- public CompletableFuture<MoveResult> move(ContainerID cid,
+ public CompletableFuture<MoveManager.MoveResult> move(ContainerID cid,
DatanodeDetails src, DatanodeDetails tgt)
throws ContainerNotFoundException, NodeNotFoundException,
TimeoutException {
@@ -763,13 +719,13 @@ public CompletableFuture<MoveResult> move(ContainerID cid,
* @param cid Container to move
* @param mp MoveDataNodePair which contains source and target datanodes
*/
- private CompletableFuture<MoveResult> move(ContainerID cid,
+ private CompletableFuture<MoveManager.MoveResult> move(ContainerID cid,
MoveDataNodePair mp) throws ContainerNotFoundException,
NodeNotFoundException, TimeoutException {
- CompletableFuture<MoveResult> ret = new CompletableFuture<>();
+ CompletableFuture<MoveManager.MoveResult> ret = new CompletableFuture<>();
if (!scmContext.isLeader()) {
- ret.complete(MoveResult.FAIL_NOT_LEADER);
+ ret.complete(MoveManager.MoveResult.FAIL_LEADER_NOT_READY);
return ret;
}
@@ -797,11 +753,15 @@ private CompletableFuture<MoveResult> move(ContainerID cid,
NodeOperationalState operationalState =
currentNodeStat.getOperationalState();
if (healthStat != NodeState.HEALTHY) {
- ret.complete(MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ LOG.info("Failing move for container {} because source {} is {}", cid,
+ srcDn.getUuidString(), healthStat.toString());
return ret;
}
if (operationalState != NodeOperationalState.IN_SERVICE) {
- ret.complete(MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
+ ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
+ LOG.info("Failing move for container {} because source {} is {}", cid,
+ srcDn.getUuidString(), operationalState.toString());
return ret;
}
@@ -809,11 +769,15 @@ private CompletableFuture<MoveResult> move(ContainerID cid,
healthStat = currentNodeStat.getHealth();
operationalState = currentNodeStat.getOperationalState();
if (healthStat != NodeState.HEALTHY) {
- ret.complete(MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ LOG.info("Failing move for container {} because target {} is {}", cid,
+ targetDn.getUuidString(), healthStat.toString());
return ret;
}
if (operationalState != NodeOperationalState.IN_SERVICE) {
- ret.complete(MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
+ ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
+ LOG.info("Failing move for container {} because target {} is {}", cid,
+ targetDn.getUuidString(), operationalState.toString());
return ret;
}
@@ -828,11 +792,12 @@ private CompletableFuture<MoveResult> move(ContainerID cid,
.map(ContainerReplica::getDatanodeDetails)
.collect(Collectors.toSet());
if (replicas.contains(targetDn)) {
- ret.complete(MoveResult.REPLICATION_FAIL_EXIST_IN_TARGET);
+ ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_EXIST_IN_TARGET);
return ret;
}
if (!replicas.contains(srcDn)) {
- ret.complete(MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE);
+ ret.complete(
+ MoveManager.MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE);
return ret;
}
@@ -845,11 +810,12 @@ private CompletableFuture<MoveResult> move(ContainerID cid,
* */
if (inflightReplication.containsKey(cid)) {
- ret.complete(MoveResult.REPLICATION_FAIL_INFLIGHT_REPLICATION);
+ ret.complete(
+ MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_REPLICATION);
return ret;
}
if (inflightDeletion.containsKey(cid)) {
- ret.complete(MoveResult.REPLICATION_FAIL_INFLIGHT_DELETION);
+ ret.complete(MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_DELETION);
return ret;
}
@@ -864,7 +830,8 @@ private CompletableFuture<MoveResult> move(ContainerID cid,
LifeCycleState currentContainerStat = cif.getState();
if (currentContainerStat != LifeCycleState.CLOSED) {
- ret.complete(MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
+ ret.complete(
+ MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
return ret;
}
@@ -872,7 +839,7 @@ private CompletableFuture<MoveResult> move(ContainerID cid,
// satisfies current placement policy
if (!isPolicySatisfiedAfterMove(cif, srcDn, targetDn,
new ArrayList<>(currentReplicas))) {
- ret.complete(MoveResult.PLACEMENT_POLICY_NOT_SATISFIED);
+ ret.complete(MoveManager.MoveResult.REPLICATION_NOT_HEALTHY_AFTER_MOVE);
return ret;
}
@@ -880,8 +847,8 @@ private CompletableFuture<MoveResult> move(ContainerID cid,
moveScheduler.startMove(cid.getProtobuf(),
mp.getProtobufMessage(ClientVersion.CURRENT_VERSION));
} catch (IOException e) {
- LOG.warn("Exception while starting move {}", cid);
- ret.complete(MoveResult.FAIL_CAN_NOT_RECORD_TO_DB);
+ LOG.warn("Exception while starting move for container {}", cid, e);
+ ret.complete(MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR);
return ret;
}
@@ -1358,7 +1325,7 @@ private void deleteSrcDnForMove(final ContainerInfo cif,
.anyMatch(r -> r.getDatanodeDetails().equals(srcDn))) {
// if the target is present but source disappears somehow,
// we can consider move is successful.
- compleleteMoveFutureWithResult(cid, MoveResult.COMPLETED);
+ compleleteMoveFutureWithResult(cid, MoveManager.MoveResult.COMPLETED);
moveScheduler.completeMove(cid.getProtobuf());
return;
}
@@ -1382,7 +1349,8 @@ private void deleteSrcDnForMove(final ContainerInfo cif,
// we just complete the future without sending a delete command.
LOG.info("can not remove source replica after successfully " +
"replicated to target datanode");
- compleleteMoveFutureWithResult(cid, MoveResult.DELETE_FAIL_POLICY);
+ compleleteMoveFutureWithResult(cid,
+ MoveManager.MoveResult.DELETE_FAIL_POLICY);
moveScheduler.completeMove(cid.getProtobuf());
}
}
@@ -1613,6 +1581,18 @@ private boolean isOpenContainerHealthy(
.allMatch(r -> compareState(state, r.getState()));
}
+ private void setHealthStateForClosing(Set<ContainerReplica> replicas,
+ ContainerInfo container,
+ ReplicationManagerReport report) {
+ if (replicas.size() == 0) {
+ report.incrementAndSample(HealthState.MISSING, container.containerID());
+ report.incrementAndSample(HealthState.UNDER_REPLICATED,
+ container.containerID());
+ report.incrementAndSample(HealthState.MIS_REPLICATED,
+ container.containerID());
+ }
+ }
+
public boolean isContainerReplicatingOrDeleting(ContainerID containerID) {
return inflightReplication.containsKey(containerID) ||
inflightDeletion.containsKey(containerID);
@@ -1682,7 +1662,8 @@ DatanodeDetails getFirstDatanode(InflightType type, ContainerID id) {
return getInflightMap(type).get(id).get(0).getDatanode();
}
- public Map<ContainerID, CompletableFuture<MoveResult>> getInflightMove() {
+ public Map<ContainerID, CompletableFuture<MoveManager.MoveResult>>
+ getInflightMove() {
return inflightMoveFuture;
}
@@ -1926,9 +1907,10 @@ private void onLeaderReadyAndOutOfSafeMode() {
/**
* complete the CompletableFuture of the container in the given Map with
- * a given MoveResult.
+ * the given MoveManager.MoveResult.
*/
- private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr) {
+ private void compleleteMoveFutureWithResult(ContainerID cid,
+ MoveManager.MoveResult mr) {
if (inflightMoveFuture.containsKey(cid)) {
inflightMoveFuture.get(cid).complete(mr);
inflightMoveFuture.remove(cid);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index cfb6aa7..9833d7a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.hdds.scm.container.balancer.MoveManager;
import org.apache.hadoop.hdds.scm.container.replication.health.MismatchedReplicasHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.ClosedWithUnhealthyReplicasHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.ClosingContainerHandler;
@@ -1127,22 +1128,23 @@ public ReplicationManagerConfiguration getConfig() {
/**
* following functions will be refactored in a separate jira.
*/
- public CompletableFuture<LegacyReplicationManager.MoveResult> move(
+ public CompletableFuture<MoveManager.MoveResult> move(
ContainerID cid, DatanodeDetails src, DatanodeDetails tgt)
throws NodeNotFoundException, ContainerNotFoundException,
TimeoutException {
- CompletableFuture<LegacyReplicationManager.MoveResult> ret =
+ CompletableFuture<MoveManager.MoveResult> ret =
new CompletableFuture<>();
if (!isRunning()) {
- ret.complete(LegacyReplicationManager.MoveResult.FAIL_NOT_RUNNING);
+ ret.complete(MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR);
+ LOG.warn("Failing move because Replication Monitor thread's " +
+ "running state is {}", isRunning());
return ret;
}
return legacyReplicationManager.move(cid, src, tgt);
}
- public Map<ContainerID,
- CompletableFuture<LegacyReplicationManager.MoveResult>>
+ public Map<ContainerID, CompletableFuture<MoveManager.MoveResult>>
getInflightMove() {
return legacyReplicationManager.getInflightMove();
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ClosingContainerHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ClosingContainerHandler.java
index c06581b..2ff21bb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ClosingContainerHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ClosingContainerHandler.java
@@ -69,4 +69,4 @@ public boolean handle(ContainerCheckRequest request) {
}
return true;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
index 6da647b..7612987 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
@@ -97,4 +97,4 @@ public boolean handle(ContainerCheckRequest request) {
});
return true;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisUnhealthyReplicationCheckHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisUnhealthyReplicationCheckHandler.java
index 9c034ee..26668c5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisUnhealthyReplicationCheckHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisUnhealthyReplicationCheckHandler.java
@@ -193,4 +193,4 @@ protected ContainerHealthResult checkReplication(
return new ContainerHealthResult.UnHealthyResult(
replicaCount.getContainer());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
index 934b01e..a640f3e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
@@ -20,4 +20,4 @@
/**
* HDDS (Closed) Container replicaton related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/report/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/report/package-info.java
index 8f539b1..91b9dae 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/report/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/report/package-info.java
@@ -16,4 +16,4 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.container.report;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.container.report;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java
index e4e8ed3..863c98b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java
@@ -77,4 +77,4 @@ public String toString() {
", pipelineID=" + pipelineID +
'}';
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
index 8ad1c8b..3329eb7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
@@ -19,4 +19,4 @@
/**
* Container States package.
*/
-package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.container.states;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
index 46181a3..e4632e5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
@@ -20,4 +20,4 @@
* Events Package contains all the Events used by SCM internally to
* communicate between different sub-systems that make up SCM.
*/
-package org.apache.hadoop.hdds.scm.events;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.events;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
index 928123f..99a021e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
@@ -108,4 +108,4 @@ public void stop() {
isStarted.set(false);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
index 71cdd92..8dd4182 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
@@ -123,4 +123,4 @@ private Object invokeRatis(Method method, Object[] args)
throw response.getException();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java
index 28366e6..c9c51b8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java
@@ -256,4 +256,4 @@ public GrpcTlsConfig getGrpcTlsConfig() {
return null;
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java
index a4bac3d..933e0f5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java
@@ -39,4 +39,4 @@ public interface SCMSnapshotDownloader extends Closeable {
* @throws IOException
*/
CompletableFuture<Path> download(Path destination) throws IOException;
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueueReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueueReportHandler.java
index a6d9ea5..18cbb12 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueueReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueueReportHandler.java
@@ -47,4 +47,4 @@ public void onMessage(CommandQueueReportFromDatanode queueReportFromDatanode,
queueReportFromDatanode.getReport(),
queueReportFromDatanode.getCommandsToBeSent());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index 528d2df..0263078 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -477,4 +477,4 @@ private NodeStatus getNodeStatus(DatanodeDetails dnd)
return nodeManager.getNodeStatus(dnd);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/InvalidNodeStateException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/InvalidNodeStateException.java
index 9c82398..accfac3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/InvalidNodeStateException.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/InvalidNodeStateException.java
@@ -31,4 +31,4 @@ public InvalidNodeStateException(String msg) {
public InvalidNodeStateException(String msg, Exception e) {
super(msg, e);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
index 68cb3f4..84a4119 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java
@@ -230,4 +230,4 @@ public int compareTo(NodeStatus o) {
return order;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 7a62088..791c530 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -1324,4 +1324,4 @@ public HDDSLayoutVersionManager getLayoutVersionManager() {
public void forceNodesToHealthyReadOnly() {
nodeStateManager.forceNodesToHealthyReadOnly();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index ed45ed0..93dfa8c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -365,4 +365,4 @@ public enum ReportStatus {
FAILED_AND_OUT_OF_SPACE_STORAGE
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java
index 6535ae4..48dbbe8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java
@@ -66,4 +66,4 @@ public void onMessage(DatanodeDetails datanodeDetails,
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
index 0b63ceb..d0572a0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
@@ -84,4 +84,4 @@ StorageReportResult build() {
return new StorageReportResult(status, fullVolumes, failedVolumes);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java
index d6a8ad0..782b973 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java
@@ -28,4 +28,4 @@
* The container manager polls the node manager to learn the state of
* datanodes that it is interested in.
* <p/>
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
index c429c5c..2e54703 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
@@ -19,4 +19,4 @@
/**
* Node States package.
*/
-package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.node.states;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index b1c3032..60b3850 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -212,4 +212,4 @@ void reinitialize(Table<PipelineID, Pipeline> pipelineStore)
* Release write lock.
*/
void releaseWriteLock();
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java
index 7ca0b7d..3ae5a4d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java
@@ -15,4 +15,4 @@
* the License.
*/
package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
-// Various pipeline choosing algorithms.
\ No newline at end of file
+// Various pipeline choosing algorithms.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
index 51adc88..a23a567 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
@@ -21,4 +21,4 @@
That means that we can have a replication pipeline build on
Ratis, Simple or some other protocol. All Pipeline managers
the entities in charge of pipelines reside in the package.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
index 9d5d72a..182e4e5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
@@ -408,4 +408,4 @@ private void setRootCAIfNeeded(SCMGetCertResponseProto.Builder builder)
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index e550a9b..2dc3c5b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -60,6 +60,8 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetExistContainerWithPipelinesInBatchRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetExistContainerWithPipelinesInBatchResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetFailedDeletedBlocksTxnRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetFailedDeletedBlocksTxnResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesRequestProto;
@@ -657,6 +659,14 @@ public ScmContainerLocationResponse processRequest(
request.getGetContainerReplicasRequest(),
request.getVersion()))
.build();
+ case GetFailedDeletedBlocksTransaction:
+ return ScmContainerLocationResponse.newBuilder()
+ .setCmdType(request.getCmdType())
+ .setStatus(Status.OK)
+ .setGetFailedDeletedBlocksTxnResponse(getFailedDeletedBlocksTxn(
+ request.getGetFailedDeletedBlocksTxnRequest()
+ ))
+ .build();
case ResetDeletedBlockRetryCount:
return ScmContainerLocationResponse.newBuilder()
.setCmdType(request.getCmdType())
@@ -1176,6 +1186,15 @@ public GetContainerCountResponseProto getClosedContainerCount(
.build();
}
+ public GetFailedDeletedBlocksTxnResponseProto getFailedDeletedBlocksTxn(
+ GetFailedDeletedBlocksTxnRequestProto request) throws IOException {
+ long startTxId = request.hasStartTxId() ? request.getStartTxId() : 0;
+ return GetFailedDeletedBlocksTxnResponseProto.newBuilder()
+ .addAllDeletedBlocksTransactions(
+ impl.getFailedDeletedBlockTxn(request.getCount(), startTxId))
+ .build();
+ }
+
public ResetDeletedBlockRetryCountResponseProto
getResetDeletedBlockRetryCount(ResetDeletedBlockRetryCountRequestProto
request) throws IOException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
index 411f22e..fe53e36 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.hdds.scm.protocol;
/**
* RPC/protobuf specific translator classes for SCM protocol.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java
index 5f516e4..b46611f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java
@@ -38,4 +38,4 @@ private SafeModeRestrictedOps() {
public static boolean isRestrictedInSafeMode(ScmOps opName) {
return restrictedOps.contains(opName);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index c73b2f0..ccd4153 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.hdds.protocol.ReconfigureProtocol;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto;
import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB;
@@ -48,6 +49,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.common.helpers.DeletedBlocksTransactionInfoWrapper;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancer;
import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancerConfiguration;
@@ -846,6 +848,26 @@ public void transferLeadership(String newLeaderId)
}
}
+ public List<DeletedBlocksTransactionInfo> getFailedDeletedBlockTxn(int count,
+ long startTxId) throws IOException {
+ List<DeletedBlocksTransactionInfo> result;
+ try {
+ result = scm.getScmBlockManager().getDeletedBlockLog()
+ .getFailedTransactions(count, startTxId).stream()
+ .map(DeletedBlocksTransactionInfoWrapper::fromTxn)
+ .collect(Collectors.toList());
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.GET_FAILED_DELETED_BLOCKS_TRANSACTION, null));
+ return result;
+ } catch (IOException ex) {
+ AUDIT.logReadFailure(
+ buildAuditMessageForFailure(
+ SCMAction.GET_FAILED_DELETED_BLOCKS_TRANSACTION, null, ex)
+ );
+ throw ex;
+ }
+ }
+
@Override
public int resetDeletedBlockRetryCount(List<Long> txIDs) throws IOException {
Map<String, String> auditMap = Maps.newHashMap();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
index f037be6..7f627bf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
@@ -38,4 +38,4 @@ boolean init(OzoneConfiguration conf, String clusterId)
boolean bootStrap(OzoneConfiguration conf)
throws IOException, AuthenticationException;
String generateClusterId();
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
index 5c4e15a..9165787 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
@@ -197,4 +197,4 @@ public String generateClusterId() {
return StorageInfo.newClusterID();
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
index fe07272..21e66e6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
@@ -19,4 +19,4 @@
* permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.server;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.server;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmOnFinalizeActionForDatanodeSchemaV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmOnFinalizeActionForDatanodeSchemaV2.java
index ac76b65..08af379 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmOnFinalizeActionForDatanodeSchemaV2.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmOnFinalizeActionForDatanodeSchemaV2.java
@@ -42,4 +42,4 @@ public void execute(SCMUpgradeFinalizationContext context) throws Exception {
LOG.info("Executing SCM On Finalize action for layout feature {}",
DATANODE_SCHEMA_V2);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java
index c3b2fb8..07c5867 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/update/server/package-info.java
@@ -19,4 +19,4 @@
/**
* CRL server package.
*/
-package org.apache.hadoop.hdds.scm.update.server;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.update.server;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
index 68d6903..57a45b1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
@@ -175,4 +175,4 @@ public void testScmDataNodeBindHostDefault() {
assertEquals(200, addr.getPort());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 4446631..eb244f3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -56,7 +56,8 @@ public class TestStorageContainerManagerHttpServer {
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
- base.mkdirs();
+ File ozoneMetadataDirectory = new File(BASEDIR, "metadata");
+ ozoneMetadataDirectory.mkdirs();
conf = new OzoneConfiguration();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(
@@ -68,6 +69,8 @@ public static void setUp() throws Exception {
KeyStoreTestUtil.getClientSSLConfigFileName());
conf.set(OzoneConfigKeys.OZONE_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getServerSSLConfigFileName());
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
+ ozoneMetadataDirectory.getAbsolutePath());
}
@AfterAll
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
index a67df69..5041bdd 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
@@ -20,4 +20,4 @@
/**
* Make checkstyle happy.
* */
-package org.apache.hadoop.hdds.scm.block;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.block;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
index f529c20..a5a5f2f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle Happy.
*/
-package org.apache.hadoop.hdds.scm.command;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.command;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index 22e01b9..4a4afac 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@ -403,4 +403,4 @@ public Boolean isNodeRegistered(DatanodeDetails datanodeDetails) {
return false;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
index a5bbe3c..69a8649 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
@@ -65,4 +65,4 @@ public void testCloseContainerAction() {
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index 469b1dd..693c61a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -945,4 +945,4 @@ protected static ContainerReportsProto getContainerReportsProto(
return crBuilder.addReports(replicaProto).build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index 1b5f6a3..8697a97 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -183,4 +183,4 @@ private ContainerInfo allocateContainer()
return containerInfo;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index c7928b2..d62e7f8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -526,4 +526,4 @@ public void testECReplicaIndexValidation() throws NodeNotFoundException,
replicaMap.put(dns.get(0), 2);
testReplicaIndexUpdate(container, dns.get(0), 2, replicaMap);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
index 84fbfc1..dfe8d30 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
@@ -178,4 +178,4 @@ private static ContainerReportsProto getContainerReportsProto(
return crBuilder.addReports(replicaProto).build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
index fb40561..1435bc3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
@@ -37,7 +37,6 @@
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager.MoveResult;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.ha.SCMService;
@@ -156,7 +155,8 @@ public void setup() throws IOException, NodeNotFoundException,
Mockito.when(replicationManager.move(Mockito.any(ContainerID.class),
Mockito.any(DatanodeDetails.class),
Mockito.any(DatanodeDetails.class)))
- .thenReturn(CompletableFuture.completedFuture(MoveResult.COMPLETED));
+ .thenReturn(CompletableFuture.
+ completedFuture(MoveManager.MoveResult.COMPLETED));
when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class)))
.thenAnswer(invocationOnMock -> {
@@ -721,7 +721,7 @@ public void checkIterationResult()
Mockito.any(DatanodeDetails.class),
Mockito.any(DatanodeDetails.class)))
.thenReturn(CompletableFuture.completedFuture(
- MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY));
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY));
balancerConfiguration.setMaxSizeToMovePerIteration(10 * STORAGE_UNIT);
startBalancer(balancerConfiguration);
@@ -774,12 +774,12 @@ public void checkIterationResultTimeoutFromReplicationManager()
throws NodeNotFoundException, IOException,
IllegalContainerBalancerStateException,
InvalidContainerBalancerConfigurationException, TimeoutException {
- CompletableFuture<MoveResult> future
+ CompletableFuture<MoveManager.MoveResult> future
= CompletableFuture.supplyAsync(() ->
- MoveResult.REPLICATION_FAIL_TIME_OUT);
- CompletableFuture<MoveResult> future2
+ MoveManager.MoveResult.REPLICATION_FAIL_TIME_OUT);
+ CompletableFuture<MoveManager.MoveResult> future2
= CompletableFuture.supplyAsync(() ->
- MoveResult.DELETION_FAIL_TIME_OUT);
+ MoveManager.MoveResult.DELETION_FAIL_TIME_OUT);
Mockito.when(replicationManager.move(Mockito.any(ContainerID.class),
Mockito.any(DatanodeDetails.class),
Mockito.any(DatanodeDetails.class)))
@@ -806,14 +806,15 @@ public void checkIterationResultException()
InvalidContainerBalancerConfigurationException,
TimeoutException {
- CompletableFuture<MoveResult> f = new CompletableFuture();
- f.completeExceptionally(new RuntimeException("Runtime Exception"));
+ CompletableFuture<MoveManager.MoveResult> future =
+ new CompletableFuture<>();
+ future.completeExceptionally(new RuntimeException("Runtime Exception"));
Mockito.when(replicationManager.move(Mockito.any(ContainerID.class),
Mockito.any(DatanodeDetails.class),
Mockito.any(DatanodeDetails.class)))
.thenThrow(new ContainerNotFoundException("Test Container not found"),
new NodeNotFoundException("Test Node not found"))
- .thenReturn(f).thenReturn(CompletableFuture.supplyAsync(() -> {
+ .thenReturn(future).thenReturn(CompletableFuture.supplyAsync(() -> {
try {
Thread.sleep(200);
} catch (Exception ex) {
@@ -1025,7 +1026,7 @@ private void stopBalancer() {
// do nothing as testcase is not threaded
}
- private CompletableFuture<MoveResult>
+ private CompletableFuture<MoveManager.MoveResult>
genCompletableFuture(int sleepMilSec) {
return CompletableFuture.supplyAsync(() -> {
try {
@@ -1033,7 +1034,7 @@ private void stopBalancer() {
} catch (InterruptedException e) {
e.printStackTrace();
}
- return MoveResult.COMPLETED;
+ return MoveManager.MoveResult.COMPLETED;
});
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
index 2f35719..f4f357e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle happy.
*/
-package org.apache.hadoop.hdds.scm.container.closer;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.container.closer;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
index f93aea6..137e865 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle Happy.
*/
-package org.apache.hadoop.hdds.scm.container;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.container;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
index 732cf20..5d17f13 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
@@ -231,4 +231,4 @@ public void testClassNotImplemented() {
ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true, null)
);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index fb169c7..1182e1b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -164,4 +164,4 @@ public void chooseDatanodes() throws SCMException {
Assertions.assertTrue(selectedCount.get(datanodes.get(4)) < selectedCount
.get(datanodes.get(6)));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index b7c152b..1481928 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -224,4 +224,4 @@ public void testIsValidNode() throws SCMException {
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java
index 6afb362..fa1e474 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java
@@ -41,12 +41,12 @@
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.SimpleMockNodeManager;
import org.apache.hadoop.hdds.scm.container.TestContainerManagerImpl;
+import org.apache.hadoop.hdds.scm.container.balancer.MoveManager;
import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager.LegacyReplicationManagerConfiguration;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault;
-import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager.MoveResult;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
@@ -400,6 +400,62 @@ public void testClosingContainer() throws IOException, TimeoutException {
Assertions.assertEquals(1, report.getStat(LifeCycleState.CLOSING));
}
+ /**
+ * Create closing container with 1 replica.
+ * Expectation: Missing containers 0.
+ * Remove the only replica.
+ * Expectation: Missing containers 1.
+ */
+ @Test
+ public void testClosingMissingContainer()
+ throws IOException, TimeoutException {
+ final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
+ final ContainerID id = container.containerID();
+
+ containerStateManager.addContainer(container.getProtobuf());
+
+ // One replica in OPEN state
+ final Set<ContainerReplica> replicas = getReplicas(id, State.OPEN,
+ randomDatanodeDetails());
+
+ for (ContainerReplica replica : replicas) {
+ containerStateManager.updateContainerReplica(id, replica);
+ }
+
+ final int currentCloseCommandCount = datanodeCommandHandler
+ .getInvocationCount(SCMCommandProto.Type.closeContainerCommand);
+
+ replicationManager.processAll();
+ eventQueue.processAll(1000);
+ Assertions.assertEquals(currentCloseCommandCount + 1,
+ datanodeCommandHandler.getInvocationCount(
+ SCMCommandProto.Type.closeContainerCommand));
+
+ ReplicationManagerReport report = replicationManager.getContainerReport();
+ Assertions.assertEquals(1, report.getStat(LifeCycleState.CLOSING));
+ Assertions.assertEquals(0, report.getStat(
+ ReplicationManagerReport.HealthState.MISSING));
+
+ for (ContainerReplica replica : replicas) {
+ containerStateManager.removeContainerReplica(id, replica);
+ }
+
+ replicationManager.processAll();
+ eventQueue.processAll(1000);
+ Assertions.assertEquals(currentCloseCommandCount + 1,
+ datanodeCommandHandler.getInvocationCount(
+ SCMCommandProto.Type.closeContainerCommand));
+
+ report = replicationManager.getContainerReport();
+ Assertions.assertEquals(1, report.getStat(LifeCycleState.CLOSING));
+ Assertions.assertEquals(1, report.getStat(
+ ReplicationManagerReport.HealthState.MISSING));
+ Assertions.assertEquals(1, report.getStat(
+ ReplicationManagerReport.HealthState.UNDER_REPLICATED));
+ Assertions.assertEquals(1, report.getStat(
+ ReplicationManagerReport.HealthState.MIS_REPLICATED));
+ }
+
@Test
public void testReplicateCommandTimeout()
throws IOException, TimeoutException {
@@ -1793,7 +1849,7 @@ public void testMove() throws IOException, NodeNotFoundException,
addReplica(container,
new NodeStatus(IN_SERVICE, HEALTHY), CLOSED);
DatanodeDetails dn3 = addNode(new NodeStatus(IN_SERVICE, HEALTHY));
- CompletableFuture<MoveResult> cf =
+ CompletableFuture<MoveManager.MoveResult> cf =
replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(scmLogs.getOutput().contains(
"receive a move request about container"));
@@ -1818,7 +1874,8 @@ public void testMove() throws IOException, NodeNotFoundException,
replicationManager.processAll();
eventQueue.processAll(1000);
- Assertions.assertTrue(cf.isDone() && cf.get() == MoveResult.COMPLETED);
+ Assertions.assertTrue(
+ cf.isDone() && cf.get() == MoveManager.MoveResult.COMPLETED);
}
/**
@@ -1933,7 +1990,7 @@ public void testMoveNotDeleteSrcIfPolicyNotSatisfied()
addReplica(container,
new NodeStatus(IN_SERVICE, HEALTHY), CLOSED);
DatanodeDetails dn4 = addNode(new NodeStatus(IN_SERVICE, HEALTHY));
- CompletableFuture<MoveResult> cf =
+ CompletableFuture<MoveManager.MoveResult> cf =
replicationManager.move(id, dn1.getDatanodeDetails(), dn4);
Assertions.assertTrue(scmLogs.getOutput().contains(
"receive a move request about container"));
@@ -1958,7 +2015,7 @@ public void testMoveNotDeleteSrcIfPolicyNotSatisfied()
dn1.getDatanodeDetails()));
Assertions.assertTrue(cf.isDone() &&
- cf.get() == MoveResult.DELETE_FAIL_POLICY);
+ cf.get() == MoveManager.MoveResult.DELETE_FAIL_POLICY);
}
@@ -1978,7 +2035,7 @@ public void testDnBecameUnhealthyWhenMoving() throws IOException,
addReplica(container,
new NodeStatus(IN_SERVICE, HEALTHY), CLOSED);
DatanodeDetails dn3 = addNode(new NodeStatus(IN_SERVICE, HEALTHY));
- CompletableFuture<MoveResult> cf =
+ CompletableFuture<MoveManager.MoveResult> cf =
replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(scmLogs.getOutput().contains(
"receive a move request about container"));
@@ -1988,7 +2045,7 @@ public void testDnBecameUnhealthyWhenMoving() throws IOException,
eventQueue.processAll(1000);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
nodeManager.setNodeStatus(dn3, new NodeStatus(IN_SERVICE, HEALTHY));
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
@@ -2001,7 +2058,7 @@ public void testDnBecameUnhealthyWhenMoving() throws IOException,
eventQueue.processAll(1000);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.DELETION_FAIL_NODE_UNHEALTHY);
+ MoveManager.MoveResult.DELETION_FAIL_NODE_UNHEALTHY);
}
/**
@@ -2023,7 +2080,7 @@ public void testMovePrerequisites() throws IOException,
ContainerReplica dn4 = addReplica(container,
new NodeStatus(IN_SERVICE, HEALTHY), CLOSED);
- CompletableFuture<MoveResult> cf;
+ CompletableFuture<MoveManager.MoveResult> cf;
//the above move is executed successfully, so there may be some item in
//inflightReplication or inflightDeletion. here we stop replication
// manager to clear these states, which may impact the tests below.
@@ -2032,26 +2089,26 @@ public void testMovePrerequisites() throws IOException,
Thread.sleep(100L);
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.FAIL_NOT_RUNNING);
+ MoveManager.MoveResult.FAIL_UNEXPECTED_ERROR);
replicationManager.start();
Thread.sleep(100L);
//container in not in OPEN state
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
+ MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
//open -> closing
containerStateManager.updateContainerState(id.getProtobuf(),
LifeCycleEvent.FINALIZE);
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
+ MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
//closing -> quasi_closed
containerStateManager.updateContainerState(id.getProtobuf(),
LifeCycleEvent.QUASI_CLOSE);
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
+ MoveManager.MoveResult.REPLICATION_FAIL_CONTAINER_NOT_CLOSED);
//quasi_closed -> closed
containerStateManager.updateContainerState(id.getProtobuf(),
@@ -2066,10 +2123,10 @@ public void testMovePrerequisites() throws IOException,
new NodeStatus(IN_SERVICE, state));
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
cf = replicationManager.move(id, dn3, dn1.getDatanodeDetails());
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_UNHEALTHY);
}
}
nodeManager.setNodeStatus(dn3, new NodeStatus(IN_SERVICE, HEALTHY));
@@ -2082,10 +2139,10 @@ public void testMovePrerequisites() throws IOException,
new NodeStatus(state, HEALTHY));
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
cf = replicationManager.move(id, dn3, dn1.getDatanodeDetails());
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
+ MoveManager.MoveResult.REPLICATION_FAIL_NODE_NOT_IN_SERVICE);
}
}
nodeManager.setNodeStatus(dn3, new NodeStatus(IN_SERVICE, HEALTHY));
@@ -2094,12 +2151,12 @@ public void testMovePrerequisites() throws IOException,
cf = replicationManager.move(id, dn1.getDatanodeDetails(),
dn2.getDatanodeDetails());
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_EXIST_IN_TARGET);
+ MoveManager.MoveResult.REPLICATION_FAIL_EXIST_IN_TARGET);
//container does not exist in source datanode
cf = replicationManager.move(id, dn3, dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE);
+ MoveManager.MoveResult.REPLICATION_FAIL_NOT_EXIST_IN_SOURCE);
//make container over relplicated to test the
// case that container is in inflightDeletion
@@ -2110,7 +2167,7 @@ public void testMovePrerequisites() throws IOException,
eventQueue.processAll(1000);
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_INFLIGHT_DELETION);
+ MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_DELETION);
resetReplicationManager();
//make the replica num be 2 to test the case
@@ -2123,7 +2180,7 @@ public void testMovePrerequisites() throws IOException,
eventQueue.processAll(1000);
cf = replicationManager.move(id, dn1.getDatanodeDetails(), dn3);
Assertions.assertTrue(cf.isDone() && cf.get() ==
- MoveResult.REPLICATION_FAIL_INFLIGHT_REPLICATION);
+ MoveManager.MoveResult.REPLICATION_FAIL_INFLIGHT_REPLICATION);
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
index 1423c99..aa74ea7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
@@ -20,4 +20,4 @@
* SCM Testing and Mocking Utils.
*/
package org.apache.hadoop.hdds.scm.container.replication;
-// Test classes for Replication functionality.
\ No newline at end of file
+// Test classes for Replication functionality.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/report/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/report/package-info.java
index d2c6677..e94ad56 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/report/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/report/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle Happy.
*/
-package org.apache.hadoop.hdds.scm.container.report;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.container.report;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
index b8141b2..41de9d1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
@@ -135,4 +135,4 @@ public void tesUpdate() throws SCMException {
Assertions.assertThrows(SCMException.class,
() -> containerAttribute.update(key3, key1, id));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
index 795dfc1..2b55c8f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle Happy.
*/
-package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.container.states;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index 327ae26..32e2c2a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -505,4 +505,4 @@ private DatanodeDetails getFirstTrackedNode() {
return
monitor.getTrackedNodes().toArray(new DatanodeDetails[0])[0];
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 6851252..7922aa1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@ -323,4 +323,4 @@ private List<DatanodeDetails> generateDatanodes() {
return dns;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
index 2cd2689..da5912d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
@@ -422,4 +422,4 @@ public Event getLastEvent() {
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
index dfd8397..bf067af 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle Happy.
*/
-package org.apache.hadoop.hdds.scm.node;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.node;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
index 277cb0d..cba96d3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
@@ -320,4 +320,4 @@ public void testProcessReportDetectNewAndMissingContainers() throws
Assertions.assertTrue(result.getNewEntries().removeAll(insertedSet),
"All inserted containers are not found.");
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
index 48b27e4..30c027a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
@@ -184,4 +184,4 @@ private DatanodeDetails generateDatanode() {
return DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
index 6610fcd..d513be2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java
@@ -20,4 +20,4 @@
/**
* Test Node2Container Map.
*/
-package org.apache.hadoop.hdds.scm.node.states;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.node.states;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
index dd5cf9b..5ca4447 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
@@ -320,4 +320,4 @@ public void releaseWriteLock() {
public boolean isPipelineCreationFrozen() {
return false;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
index 6418cad..6ac0b53 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
@@ -68,4 +68,4 @@ public void testCloseActionForMissingPipeline()
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
index 7b69a9c..91a12b2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
@@ -532,4 +532,4 @@ private void deactivatePipeline(HddsProtos.Pipeline pipeline)
stateManager.updatePipelineState(pipeline.getId(),
HddsProtos.PipelineState.PIPELINE_DORMANT);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
index 1888c1c..8f7975c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
@@ -153,4 +153,4 @@ public void testCreatePipelineWithNodes() throws IOException {
Pipeline.PipelineState.OPEN);
Assertions.assertEquals(pipeline.getNodes().size(), factor.getNumber());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/pipeline/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/pipeline/package-info.java
index f685b17..2d0e94f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/pipeline/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/pipeline/package-info.java
@@ -19,4 +19,4 @@
/**
* Package info tests.
*/
-package org.apache.hadoop.hdds.scm.pipeline;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.pipeline;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index 20c1a78..f177226 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -743,4 +743,4 @@ public void onMessage(SCMSafeModeManager.SafeModeStatus safeModeStatus,
isInSafeMode.set(safeModeStatus.isInSafeMode());
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 7379d50..7c7a15d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -147,4 +147,4 @@ public void testSortDatanodes() throws Exception {
resp.getNodeList().stream().forEach(
node -> System.out.println(node.getNetworkName()));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
index 21df4bd..5480caa 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
@@ -62,4 +62,4 @@ public void testStart() throws IOException {
public void testStop() {
securityProtocolServer.stop();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
index a7a5395..ff1da5c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
@@ -200,4 +200,4 @@ public String generateClusterId() {
return "static-cluster-id";
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java
index 7a11a3a..7e2d529 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java
@@ -69,4 +69,4 @@ public void testStartupSlvLessThanMlv(@TempDir Path tempDir)
mlv, largestSlv);
Assertions.assertEquals(expectedMessage, t.getMessage());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
index da2ae84..f61555e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle Happy.
*/
-package org.apache.hadoop.ozone.container.common;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
index ddd751c..935d1d8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
@@ -19,4 +19,4 @@
/**
* Make CheckStyle Happy.
*/
-package org.apache.hadoop.ozone.container.placement;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.placement;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
index 4e8a90b..0d4f6fc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
@@ -15,4 +15,4 @@
* the License.
*/
package org.apache.hadoop.ozone.container.testutils;
-// Helper classes for ozone and container tests.
\ No newline at end of file
+// Helper classes for ozone and container tests.
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java
index d81e971..158bc6d 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java
@@ -90,4 +90,4 @@ public void execute(ScmClient scmClient) throws IOException {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index d72ed7b..22050f1 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto;
import org.apache.hadoop.hdds.scm.DatanodeAdminError;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -470,6 +471,13 @@ public void transferLeadership(String newLeaderId) throws IOException {
}
@Override
+ public List<DeletedBlocksTransactionInfo> getFailedDeletedBlockTxn(int count,
+ long startTxId) throws IOException {
+ return storageContainerLocationClient.getFailedDeletedBlockTxn(count,
+ startTxId);
+ }
+
+ @Override
public int resetDeletedBlockRetryCount(List<Long> txIDs) throws IOException {
return storageContainerLocationClient.resetDeletedBlockRetryCount(txIDs);
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
index 4714761..babcabe 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
@@ -192,4 +192,4 @@ private void printNodesWithLocation(Collection<HddsProtos.Node> nodes) {
node.getNodeID().getNetworkLocation() : "NA"));
});
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java
index 5271051..6177c8f 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java
@@ -70,4 +70,4 @@ public void execute(SCMSecurityProtocol client) throws IOException {
throw new IOException("Fail to get certificate id " + serialId, ex);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/package-info.java
index 3541194..c1e4cbe 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/package-info.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/package-info.java
@@ -19,4 +19,4 @@
/**
* Contains all of the SCM CA certificate related commands.
*/
-package org.apache.hadoop.hdds.scm.cli.cert;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.cli.cert;
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java
index ff8adbc..1a16786 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java
@@ -19,4 +19,4 @@
/**
* Contains all of the container related scm commands.
*/
-package org.apache.hadoop.hdds.scm.cli.container;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.cli.container;
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/package-info.java
index f4c45cf..b750dd4 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/package-info.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/package-info.java
@@ -19,4 +19,4 @@
/**
* Contains all of the datanode related scm commands.
*/
-package org.apache.hadoop.hdds.scm.cli.datanode;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.cli.datanode;
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java
index d358b3c..6b819a4 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java
@@ -20,4 +20,4 @@
/**
* SCM related cli tools.
*/
-package org.apache.hadoop.hdds.scm.cli;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.cli;
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
index 88c771e..b64673b 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
@@ -28,11 +28,14 @@
import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.server.JsonUtils;
import picocli.CommandLine;
import java.io.IOException;
+import java.util.List;
import java.util.Optional;
import java.util.function.Predicate;
+import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
@@ -68,6 +71,11 @@ public class ListPipelinesSubcommand extends ScmSubcommand {
defaultValue = "")
private String state;
+ @CommandLine.Option(names = { "--json" },
+ defaultValue = "false",
+ description = "Format output as JSON")
+ private boolean json;
+
@Override
public void execute(ScmClient scmClient) throws IOException {
Optional<Predicate<? super Pipeline>> replicationFilter =
@@ -81,7 +89,14 @@ public void execute(ScmClient scmClient) throws IOException {
stream = stream.filter(p -> p.getPipelineState().toString()
.compareToIgnoreCase(state) == 0);
}
- stream.forEach(System.out::println);
+
+ if (json) {
+ List<Pipeline> pipelineList = stream.collect(Collectors.toList());
+ System.out.print(
+ JsonUtils.toJsonStringWithDefaultPrettyPrinter(pipelineList));
+ } else {
+ stream.forEach(System.out::println);
+ }
}
private Optional<Predicate<? super Pipeline>> getReplicationFilter() {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java
index 64924d1..70e3057 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java
@@ -19,4 +19,4 @@
/**
* Contains all of the pipeline related scm commands.
*/
-package org.apache.hadoop.hdds.scm.cli.pipeline;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.cli.pipeline;
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/cert/TestCleanExpired.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/cert/TestCleanExpired.java
index b169e63..dc5705f 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/cert/TestCleanExpired.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/cert/TestCleanExpired.java
@@ -97,4 +97,4 @@ public void testOnlyExpiredCertsRemoved()
cmd.removeExpiredCertificates(dbStore);
Mockito.verify(iterator, Mockito.times(1)).removeFromDB();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
index d35fa79..3c3d1bb 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
@@ -140,4 +140,4 @@ private List<HddsProtos.Node> getNodeDetails() {
}
return nodes;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
index c7e6306..63b260f 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
@@ -132,4 +132,4 @@ public void testErrorsReportedWhenEnteringMaintenance() throws IOException {
assertTrue(m.find());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index 60665c7f..7d6aac8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -528,4 +528,4 @@ private List<OzoneBucket> getNextListOfBuckets(String prevBucket) {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/package-info.java
index 9345072..25d74e9 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/package-info.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains Ozone Client classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 0097078..e94074e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -452,8 +452,11 @@ public void hflush() throws IOException {
@Override
public void hsync() throws IOException {
checkNotClosed();
+ final long hsyncPos = writeOffset;
handleFlushOrClose(StreamAction.HSYNC);
- blockOutputStreamEntryPool.hsyncKey(offset);
+ Preconditions.checkState(offset >= hsyncPos,
+ "offset = %s < hsyncPos = %s", offset, hsyncPos);
+ blockOutputStreamEntryPool.hsyncKey(hsyncPos);
}
/**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
index 7e2591a..8f04add 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains Ozone Client classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
index 23b181f..68ff353 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
@@ -226,4 +226,4 @@ private OzoneBucket getOzoneBucket() throws IOException {
volume.createBucket(bucketName);
return volume.getBucket(bucketName);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
index 7e896f3..32cbe4d 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
@@ -97,4 +97,4 @@ private AbstractBlockChecksumComputer buildBlockChecksumComputer(
return new ReplicatedBlockChecksumComputer(chunkInfoList);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
index 3954bb9..4cb4a6b 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
@@ -370,4 +370,4 @@ public void testPutKeyChecksum() throws IOException {
assertEquals(1, helper.getKeyLocationInfoList().size());
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/package-info.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/package-info.java
index 2ed26bf..f61083f 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/package-info.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains test classes for Ozone Client checksum APIs.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java
index be63eab..35dcd2b 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains test classes for Ozone Client.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/RpcClientTest.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/RpcClientTest.java
index edddad9..19aa3a7 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/RpcClientTest.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/RpcClientTest.java
@@ -229,4 +229,4 @@ public void testFutureVersionShouldNotBeAnExpectedVersion() {
IllegalArgumentException.class,
() -> validateOmVersion(OzoneManagerVersion.FUTURE_VERSION, null));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
index 3f597ab..1304f71 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
@@ -49,4 +49,4 @@ public void getKeyProvider() {
"not configured.");
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java
index b73153f..03a8344 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java
@@ -37,4 +37,4 @@ public class OzoneIllegalArgumentException extends IllegalArgumentException {
public OzoneIllegalArgumentException(final String message) {
super(message);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
index ece1ff4..db6df61 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java
@@ -19,4 +19,4 @@
/**
* IO related ozone helper classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/OMClientConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/OMClientConfig.java
index cbae089..387e036 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/OMClientConfig.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/OMClientConfig.java
@@ -81,4 +81,4 @@ public void setRpcTimeOut(long timeOut) {
}
this.rpcTimeOut = timeOut;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
index 3e7ec6a..715e42c 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
@@ -19,4 +19,4 @@
/**
* Package contains classes related to ozone configuration.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
index d2867c4..ab13874 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.ha;
/**
This package contains HA related code.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
index a95f09f..62cee81 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains Ozone Client's OM Proxy classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java
index c180138..f4b2452 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java
@@ -80,4 +80,4 @@ public BucketEncryptionKeyInfo build() {
return new BucketEncryptionKeyInfo(version, suite, keyName);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java
index 4da8d2b..db07a19 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java
@@ -51,4 +51,4 @@ public static List<KeyValue> toProtobuf(Map<String, String> keyValueMap) {
}
return metadataList;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
index 01d4f38..bd379c2 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java
@@ -87,4 +87,4 @@ public static String smProtoToString(StateMachineLogEntryProto proto) {
}
return builder.toString();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyRenameInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyRenameInfo.java
new file mode 100644
index 0000000..bb6001d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyRenameInfo.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyRenameInfo;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Argument for renamedKeyTable. Helps to store List<String> which represents
+ * all the renames that happened to particular key in between snapshots.
+ */
+public class OmKeyRenameInfo {
+ private List<String> keyNamesList;
+
+ public OmKeyRenameInfo(List<String> keyNamesList) {
+ this.keyNamesList = keyNamesList;
+ }
+
+ public OmKeyRenameInfo(String keyRenameInfo) {
+ this.keyNamesList = new ArrayList<>();
+ this.keyNamesList.add(keyRenameInfo);
+ }
+
+ public void addOmKeyRenameInfo(String keyRenameInfo) {
+ this.keyNamesList.add(keyRenameInfo);
+ }
+
+ public List<String> getOmKeyRenameInfoList() {
+ return keyNamesList;
+ }
+
+ public List<String> cloneOmKeyRenameInfoList() {
+ return new ArrayList<>(keyNamesList);
+ }
+
+
+ public static OmKeyRenameInfo getFromProto(KeyRenameInfo
+ keyRenameInfo) throws IOException {
+ List<String> list = new ArrayList<>(keyRenameInfo.getKeyNamesList());
+ return new OmKeyRenameInfo.Builder().setOmKeyRenameList(list).build();
+ }
+
+ public KeyRenameInfo getProto() {
+ List<String> list = new ArrayList<>(cloneOmKeyRenameInfoList());
+
+ KeyRenameInfo.Builder builder = KeyRenameInfo.newBuilder()
+ .addAllKeyNames(list);
+ return builder.build();
+ }
+
+ public OmKeyRenameInfo copyObject() {
+ return new OmKeyRenameInfo(new ArrayList<>(keyNamesList));
+ }
+
+ /**
+ * Builder of OmKeyRenameInfo.
+ */
+ public static class Builder {
+ private List<String> keyNamesList;
+
+ public Builder() { }
+
+ public OmKeyRenameInfo.Builder setOmKeyRenameList(List<String> stringList) {
+ this.keyNamesList = stringList;
+ return this;
+ }
+
+ public OmKeyRenameInfo build() {
+ return new OmKeyRenameInfo(keyNamesList);
+ }
+ }
+
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
index b1211d8..9a44755 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
@@ -15,4 +15,4 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.om.helpers;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.helpers;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java
index 5feac5f..f7e626d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java
@@ -19,4 +19,4 @@
/**
* Classes related to ozone manager lock.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/OzoneTenantRolePrincipal.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/OzoneTenantRolePrincipal.java
index baf500c..39cb48b 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/OzoneTenantRolePrincipal.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/OzoneTenantRolePrincipal.java
@@ -38,4 +38,4 @@ public String toString() {
public String getName() {
return tenantRoleName;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
index 1744cff..532111a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.om;
/**
This package contains client side protocol library to communicate with OM.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
index 9c7f388..9229fb0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
@@ -16,4 +16,4 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.om.protocol;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.protocol;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
index d595edf..ee76d5c 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
@@ -16,4 +16,4 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.om.protocolPB;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.protocolPB;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java
index 69d94b6..53b5703 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java
@@ -19,4 +19,4 @@
/**
* Classes related to ozone REST interface.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
index 5c572ef..cb98d53 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
@@ -19,4 +19,4 @@
/**
* Classes related to ozone Ozone ACL.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java
index 457f891..b395a15 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.security;
/**
* Ozone security related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/package-info.java
index 774beea..6a2c3ba 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/package-info.java
@@ -19,4 +19,4 @@
/**
* Ozone snapshot related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java
index 7bc89c1..7a9df01 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java
@@ -19,4 +19,4 @@
/**
* Ozone utilities.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java
index f321da2..d13ae1d 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java
@@ -36,4 +36,4 @@ public void from() {
Assert.assertEquals("dir1/key1", info.getKeyName());
Assert.assertEquals("uploadId", info.getUploadId());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
index fab4f73..91e33e2 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java
@@ -149,4 +149,4 @@ public void testFromProtobufOp() {
objInfo = getBuilder(null, null, key).build();
assertEquals(objInfo.getKeyName(), key);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java
index 1b558dd..d18d0d5 100644
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java
+++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java
@@ -19,4 +19,4 @@
/**
* Container Storage Interface server implementation for Ozone.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index b5aeb81..f834279 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -76,6 +76,7 @@
share/ozone/lib/hdds-interface-admin.jar
share/ozone/lib/hdds-interface-client.jar
share/ozone/lib/hdds-interface-server.jar
+share/ozone/lib/hdds-managed-rocksdb.jar
share/ozone/lib/hdds-server-framework.jar
share/ozone/lib/hdds-server-scm.jar
share/ozone/lib/hdds-tools.jar
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/package-info.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/package-info.java
index e93958a..2d9509b 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/package-info.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/package-info.java
@@ -16,4 +16,4 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.failure;
\ No newline at end of file
+package org.apache.hadoop.ozone.failure;
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java
index fb585ef..313c1c9 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java
@@ -46,4 +46,4 @@ public void initialize() throws Exception {
fsBucket.createDirectory(keyName);
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java
index 6ca1900..90a62de 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java
@@ -54,4 +54,4 @@ public void generateLoad() throws Exception {
public void initialize() throws Exception {
// Nothing to do here
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java
index 029148e..1d77743 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java
@@ -42,4 +42,4 @@ public void generateLoad() throws Exception {
public void initialize() {
// Nothing to do here
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties
index 9eebeae..20d2262 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties
@@ -37,4 +37,4 @@
log4j.appender.PROBLEM.layout=org.apache.log4j.PatternLayout
log4j.appender.PROBLEM.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
-log4j.additivity.org.apache.hadoop.ozone.utils=false
\ No newline at end of file
+log4j.additivity.org.apache.hadoop.ozone.utils=false
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java
index 97dd495..918844d 100644
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java
@@ -20,4 +20,4 @@
/**
* Insight points for the ozone datanodes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java
index c0dfc4d..da976f4 100644
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java
@@ -20,4 +20,4 @@
/**
* Insight points for the Ozone Manager.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java
index a77524d..bc352d9 100644
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java
@@ -21,4 +21,4 @@
/**
* Framework to collect log/metrics and configuration for specified ozone
* components.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java
index 0966fbd..e804161 100644
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java
@@ -20,4 +20,4 @@
/**
* Insight points for the Storage Container Manager.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
index b6f897d..d8943b3 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
@@ -69,4 +69,4 @@ public String getDescription() {
"This a log specific to [datanode=456] [pipeline=abcd]"));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
index ef8a190..0934f4a 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
@@ -99,4 +99,4 @@ public void setClientAddress(String clientAddress) {
this.clientAddress = clientAddress;
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
index 7f30966..4bce517 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
@@ -38,4 +38,4 @@ public void filterLog() {
+ "storageType: DISK\\n failed: false\\n}\\n</json>");
Assert.assertEquals(10, result.split("\n").length);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index 3e5ba5a..16d968c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -20,9 +20,14 @@
import java.io.Closeable;
import java.io.IOException;
+import java.security.GeneralSecurityException;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoOutputStream;
+import org.apache.hadoop.crypto.Encryptor;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -38,6 +43,9 @@
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.io.ECKeyOutputStream;
+import org.apache.hadoop.ozone.client.io.KeyOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.tag.Flaky;
@@ -46,6 +54,8 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT;
@@ -57,12 +67,16 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
/**
* Test HSync.
*/
@Timeout(value = 300)
public class TestHSync {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestHSync.class);
private static MiniOzoneCluster cluster;
private static OzoneBucket bucket;
@@ -112,10 +126,11 @@ public void testO3fsHSync() throws Exception {
OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
- final Path file = new Path("/file");
-
try (FileSystem fs = FileSystem.get(CONF)) {
- runTestHSync(fs, file);
+ for (int i = 0; i < 10; i++) {
+ final Path file = new Path("/file" + i);
+ runTestHSync(fs, file, 1 << i);
+ }
}
}
@@ -129,17 +144,20 @@ public void testOfsHSync() throws Exception {
final String dir = OZONE_ROOT + bucket.getVolumeName()
+ OZONE_URI_DELIMITER + bucket.getName();
- final Path file = new Path(dir, "file");
try (FileSystem fs = FileSystem.get(CONF)) {
- runTestHSync(fs, file);
+ for (int i = 0; i < 10; i++) {
+ final Path file = new Path(dir, "file" + i);
+ runTestHSync(fs, file, 1 << i);
+ }
}
}
- static void runTestHSync(FileSystem fs, Path file) throws Exception {
+ static void runTestHSync(FileSystem fs, Path file, int initialDataSize)
+ throws Exception {
try (StreamWithLength out = new StreamWithLength(
fs.create(file, true))) {
- runTestHSync(fs, file, out, 1);
+ runTestHSync(fs, file, out, initialDataSize);
for (int i = 1; i < 5; i++) {
for (int j = -1; j <= 1; j++) {
int dataSize = (1 << (i * 5)) + j;
@@ -177,6 +195,8 @@ static void runTestHSync(FileSystem fs, Path file,
StreamWithLength out, int dataSize)
throws Exception {
final long length = out.getLength();
+ LOG.info("runTestHSync {} with size {}, skipLength={}",
+ file, dataSize, length);
final byte[] data = new byte[dataSize];
ThreadLocalRandom.current().nextBytes(data);
out.writeAndHsync(data);
@@ -219,6 +239,8 @@ public void testStreamCapability() throws Exception {
assertTrue(os.hasCapability(StreamCapabilities.HSYNC),
"KeyOutputStream should support hsync()!");
}
+
+ testEncryptedStreamCapabilities(false);
}
@Test
@@ -251,5 +273,34 @@ public void testECStreamCapability() throws Exception {
assertFalse(os.hasCapability(StreamCapabilities.HSYNC),
"ECKeyOutputStream should not support hsync()!");
}
+ testEncryptedStreamCapabilities(true);
+ }
+
+ private void testEncryptedStreamCapabilities(boolean isEC) throws IOException,
+ GeneralSecurityException {
+ KeyOutputStream kos;
+ if (isEC) {
+ kos = mock(ECKeyOutputStream.class);
+ } else {
+ kos = mock(KeyOutputStream.class);
+ }
+ CryptoCodec codec = mock(CryptoCodec.class);
+ when(codec.getCipherSuite()).thenReturn(CipherSuite.AES_CTR_NOPADDING);
+ when(codec.getConf()).thenReturn(CONF);
+ Encryptor encryptor = mock(Encryptor.class);
+ when(codec.createEncryptor()).thenReturn(encryptor);
+ CryptoOutputStream cos =
+ new CryptoOutputStream(kos, codec, new byte[0], new byte[0]);
+ OzoneOutputStream oos = new OzoneOutputStream(cos);
+ OzoneFSOutputStream ofso = new OzoneFSOutputStream(oos);
+
+ try (CapableOzoneFSOutputStream cofsos =
+ new CapableOzoneFSOutputStream(ofso)) {
+ if (isEC) {
+ assertFalse(cofsos.hasCapability(StreamCapabilities.HFLUSH));
+ } else {
+ assertTrue(cofsos.hasCapability(StreamCapabilities.HFLUSH));
+ }
+ }
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
index e40b22e..1af6b87 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
@@ -44,4 +44,4 @@ public static void teardownCluster() throws IOException {
protected AbstractFSContract createContract(Configuration conf) {
return new OzoneContract(conf);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
index 51284c2..cc64907 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -19,4 +19,4 @@
/**
* Ozone FS Contract tests.
*/
-package org.apache.hadoop.fs.ozone;
\ No newline at end of file
+package org.apache.hadoop.fs.ozone;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
index 67cccf5..6e3bfc4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
@@ -125,4 +125,4 @@ public void testPipelineWithScmRestart()
Assertions.assertEquals(ratisPipeline1.getId(),
containerInfo.getPipelineID());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
index f685b17..2d0e94f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java
@@ -19,4 +19,4 @@
/**
* Package info tests.
*/
-package org.apache.hadoop.hdds.scm.pipeline;
\ No newline at end of file
+package org.apache.hadoop.hdds.scm.pipeline;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
index 17696fb..dd594c0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
@@ -203,4 +203,4 @@ private void waitForRatis1NodePipelines(int numPipelines)
Pipeline.PipelineState.OPEN)
.size() == numPipelines, 100, 60000);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java
index 8663c72..677fd99 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java
@@ -119,4 +119,4 @@ public void testContainerBalancerCLIOperations() throws Exception {
}
//TODO: add more acceptance after container balancer is fully completed
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 89a17f7..33f3e9d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -357,7 +357,7 @@ public void testBlockDeletionTransactions() throws Exception {
cluster.getStorageContainerManager().getScmHAManager()
.asSCMHADBTransactionBuffer().flush();
}
- return delLog.getFailedTransactions().size() == 0;
+ return delLog.getFailedTransactions(-1, 0).size() == 0;
} catch (IOException e) {
return false;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index 6c69fa7..cab228d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -189,4 +189,4 @@ private void waitForContainerClose(OzoneOutputStream outputStream)
.waitForContainerClose(outputStream, cluster);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
index 67bdc17..1a24c42 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
@@ -18,4 +18,4 @@
/**
* Integration tests for the command handler's.
*/
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
\ No newline at end of file
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index c09df7a..8befd3a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -180,4 +180,4 @@ public void testContainerMetrics() throws Exception {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
index b41bd15..3e1e102 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
@@ -108,4 +108,4 @@ private long getGauge(String metricName) {
return getLongGauge(metricName,
getMetrics(DatanodeQueueMetrics.METRICS_SOURCE_NAME));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
index 95eeadc..ca722cf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
@@ -229,4 +229,4 @@ private String testCase() {
}
return "valid token";
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/package-info.java
index cfc6004..ddf9f5b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/package-info.java
@@ -19,4 +19,4 @@
/**
* Test container related classes.
*/
-package org.apache.hadoop.ozone.container;
\ No newline at end of file
+package org.apache.hadoop.ozone.container;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 4b87ed1..8be6d4e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -349,4 +349,4 @@ private static String getToken(ContainerID containerID) throws IOException {
containerTokenSecretManager.createIdentifier(username, containerID)
).encodeToUrlString();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java
index c89f20c..2414c6d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java
@@ -165,7 +165,7 @@ private Map<String, String> getTablePrefixes(
@Test
public void testDAGReconstruction()
- throws IOException, InterruptedException, TimeoutException {
+ throws IOException, InterruptedException, TimeoutException {
// Generate keys
RandomKeyGenerator randomKeyGenerator =
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
index 132d086..5e96bf1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
@@ -87,6 +87,7 @@ void testDefault() {
Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
+ randomKeyGenerator.printStats(System.out);
}
@Test
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
index 8f29735..cd8f7f8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java
@@ -146,4 +146,4 @@ public static void shutdown() throws IOException {
cluster.shutdown();
FileUtils.deleteFully(new File(dbPath));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 12bba86..361151b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -152,4 +152,4 @@ private static ContainerData getContainerData(long containerID) {
containerManager.getContainer(containerID).getContainerData();
return containerData;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java
index b798d1a..ea1f4db 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java
@@ -207,4 +207,4 @@ private void checkKeyList(String keyPrefix, String startKey,
System.out.println("END:::keyPrefix---> " + keyPrefix + ":::---> " +
startKey);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index 403c99f..6c10b7b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -316,4 +316,4 @@ public long checkpointCreationTimeTaken() {
public void cleanupCheckpoint() throws IOException {
FileUtils.deleteDirectory(checkpointFile.toFile());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java
index 214d386..07dffcb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithBucketLayout.java
@@ -158,4 +158,4 @@ private void verifyBucketLayout(OzoneBucket bucket,
Assert.assertEquals(metadataLayout, bucket.getBucketLayout());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
index 50aa6ff..df7b3a7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java
@@ -671,4 +671,4 @@ private static Pipeline createPipeline(DatanodeDetails dn) {
.setNodes(Collections.singletonList(dn))
.build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
index 6bb4f3f..a5941ff 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -94,6 +95,7 @@ public class TestOmSnapshot {
private static OzoneManagerProtocol writeClient;
private static BucketLayout bucketLayout = BucketLayout.LEGACY;
private static boolean enabledFileSystemPaths;
+ private static boolean forceFullSnapshotDiff;
private static ObjectStore store;
private static File metaDir;
private static OzoneManager leaderOzoneManager;
@@ -108,30 +110,34 @@ public class TestOmSnapshot {
@Parameterized.Parameters
public static Collection<Object[]> data() {
return Arrays.asList(
- new Object[]{OBJECT_STORE, false},
- new Object[]{FILE_SYSTEM_OPTIMIZED, false},
- new Object[]{BucketLayout.LEGACY, true});
+ new Object[]{OBJECT_STORE, false, false},
+ new Object[]{FILE_SYSTEM_OPTIMIZED, false, false},
+ new Object[]{BucketLayout.LEGACY, true, true});
}
public TestOmSnapshot(BucketLayout newBucketLayout,
- boolean newEnableFileSystemPaths) throws Exception {
+ boolean newEnableFileSystemPaths, boolean forceFullSnapDiff)
+ throws Exception {
// Checking whether 'newBucketLayout' and
// 'newEnableFileSystemPaths' flags represents next parameter
// index values. This is to ensure that initialize init() function
// will be invoked only at the beginning of every new set of
// Parameterized.Parameters.
if (TestOmSnapshot.enabledFileSystemPaths != newEnableFileSystemPaths ||
- TestOmSnapshot.bucketLayout != newBucketLayout) {
- setConfig(newBucketLayout, newEnableFileSystemPaths);
+ TestOmSnapshot.bucketLayout != newBucketLayout ||
+ TestOmSnapshot.forceFullSnapshotDiff != forceFullSnapDiff) {
+ setConfig(newBucketLayout, newEnableFileSystemPaths,
+ forceFullSnapDiff);
tearDown();
init();
}
}
private static void setConfig(BucketLayout newBucketLayout,
- boolean newEnableFileSystemPaths) {
+ boolean newEnableFileSystemPaths, boolean forceFullSnapDiff) {
TestOmSnapshot.enabledFileSystemPaths = newEnableFileSystemPaths;
TestOmSnapshot.bucketLayout = newBucketLayout;
+ TestOmSnapshot.forceFullSnapshotDiff = forceFullSnapDiff;
}
/**
@@ -145,6 +151,8 @@ private void init() throws Exception {
enabledFileSystemPaths);
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
bucketLayout.name());
+ conf.setBoolean(OzoneConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF,
+ forceFullSnapshotDiff);
conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST);
cluster = MiniOzoneCluster.newOMHABuilder(conf)
@@ -325,10 +333,10 @@ public void checkKey() throws Exception {
}
}
OmKeyArgs keyArgs = genKeyArgs(snapshotKeyPrefix + key1);
-
+
OmKeyInfo omKeyInfo = writeClient.lookupKey(keyArgs);
assertEquals(omKeyInfo.getKeyName(), snapshotKeyPrefix + key1);
-
+
OmKeyInfo fileInfo = writeClient.lookupFile(keyArgs);
assertEquals(fileInfo.getKeyName(), snapshotKeyPrefix + key1);
@@ -615,6 +623,33 @@ public void testSnapDiffMissingMandatoryParams() throws Exception {
() -> store.snapshotDiff(nullstr, bucket, snap1, snap2));
}
+ @Test
+ public void testSnapDiffMultipleBuckets() throws Exception {
+ String volume = "vol-" + RandomStringUtils.randomNumeric(5);
+ String bucketName1 = "buck-" + RandomStringUtils.randomNumeric(5);
+ String bucketName2 = "buck-" + RandomStringUtils.randomNumeric(5);
+ store.createVolume(volume);
+ OzoneVolume volume1 = store.getVolume(volume);
+ volume1.createBucket(bucketName1);
+ volume1.createBucket(bucketName2);
+ OzoneBucket bucket1 = volume1.getBucket(bucketName1);
+ OzoneBucket bucket2 = volume1.getBucket(bucketName2);
+ // Create Key1 and take snapshot
+ String key1 = "key-1-";
+ key1 = createFileKey(bucket1, key1);
+ String snap1 = "snap" + RandomStringUtils.randomNumeric(5);
+ createSnapshot(volume, bucketName1, snap1);
+ // Create key in bucket2 and bucket1 and calculate diff
+ // Diff should not contain bucket2's key
+ createFileKey(bucket1, key1);
+ createFileKey(bucket2, key1);
+ String snap2 = "snap" + RandomStringUtils.randomNumeric(5);
+ createSnapshot(volume, bucketName1, snap2);
+ SnapshotDiffReport diff1 =
+ store.snapshotDiff(volume, bucketName1, snap1, snap2);
+ Assert.assertEquals(1, diff1.getDiffList().size());
+ }
+
/**
* Tests snapdiff when there are multiple sst files in the from & to
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java
index 5ad6770..cf5220ce 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java
@@ -19,4 +19,4 @@
/**
* Ozone Manager Tests.
*/
-package org.apache.hadoop.ozone.om;
\ No newline at end of file
+package org.apache.hadoop.ozone.om;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
index 84eb8dd..ebab74d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java
@@ -19,4 +19,4 @@
/**
* Test utils for Ozone.
*/
-package org.apache.hadoop.ozone;
\ No newline at end of file
+package org.apache.hadoop.ozone;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
index d42f29b..ca4bb9e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
@@ -173,8 +173,8 @@ private void invokeXceiverClientReadChunk(XceiverClientSpi client)
.setBytesPerChecksum(512)
.setType(ContainerProtos.ChecksumType.CRC32)
.build())
- .setLen(100)
- .setOffset(100)
+ .setLen(-1)
+ .setOffset(0)
.build(),
bid,
null, null);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java
index 7ac6d18..1e03823 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java
@@ -21,4 +21,4 @@
/**
* Unit tests for Node related functions in SCM.
*/
-package org.apache.hadoop.ozone.scm.node;
\ No newline at end of file
+package org.apache.hadoop.ozone.scm.node;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java
index ea6734a..a71f981 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java
@@ -21,4 +21,4 @@
/**
* Unit tests for Pipeline related functions in SCM.
*/
-package org.apache.hadoop.ozone.scm.pipeline;
\ No newline at end of file
+package org.apache.hadoop.ozone.scm.pipeline;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
new file mode 100644
index 0000000..e74041c
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.shell;
+
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
+import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.admin.scm.GetFailedDeletedBlocksTxnSubcommand;
+import org.apache.hadoop.ozone.admin.scm.ResetDeletedBlockRetryCountSubcommand;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.UUID;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
+
+/**
+ * Test for DeletedBlocksTxnSubcommand Cli.
+ */
+public class TestDeletedBlocksTxnShell {
+
+ private static final Logger LOG = LoggerFactory
+ .getLogger(TestDeletedBlocksTxnShell.class);
+
+ private final PrintStream originalOut = System.out;
+ private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
+ private MiniOzoneHAClusterImpl cluster = null;
+ private OzoneConfiguration conf;
+ private String clusterId;
+ private String scmId;
+ private String scmServiceId;
+ private File txnFile;
+ private int numOfSCMs = 3;
+
+ private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name();
+
+ @TempDir
+ private Path tempDir;
+
+ /**
+ * Create a MiniOzoneHACluster for testing.
+ *
+ * @throws IOException
+ */
+ @BeforeEach
+ public void init() throws Exception {
+ conf = new OzoneConfiguration();
+ clusterId = UUID.randomUUID().toString();
+ scmId = UUID.randomUUID().toString();
+ scmServiceId = "scm-service-test1";
+
+ conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
+ conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
+
+ cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
+ .setClusterId(clusterId)
+ .setScmId(scmId)
+ .setSCMServiceId(scmServiceId)
+ .setNumOfStorageContainerManagers(numOfSCMs)
+ .setNumOfActiveSCMs(numOfSCMs)
+ .setNumOfOzoneManagers(1)
+ .build();
+ cluster.waitForClusterToBeReady();
+
+ txnFile = tempDir.resolve("txn.txt").toFile();
+ LOG.info("txnFile path: {}", txnFile.getAbsolutePath());
+ System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
+ }
+
+ /**
+ * Shutdown MiniDFSCluster.
+ */
+ @AfterEach
+ public void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ System.setOut(originalOut);
+ }
+
+ //<containerID, List<blockID>>
+ private Map<Long, List<Long>> generateData(int dataSize) throws Exception {
+ Map<Long, List<Long>> blockMap = new HashMap<>();
+ Random random = new Random(1);
+ int continerIDBase = random.nextInt(100);
+ int localIDBase = random.nextInt(1000);
+ for (int i = 0; i < dataSize; i++) {
+ long containerID = continerIDBase + i;
+ updateContainerMetadata(containerID);
+ List<Long> blocks = new ArrayList<>();
+ for (int j = 0; j < 5; j++) {
+ long localID = localIDBase + j;
+ blocks.add(localID);
+ }
+ blockMap.put(containerID, blocks);
+ }
+ return blockMap;
+ }
+
+ private void updateContainerMetadata(long cid) throws Exception {
+ final ContainerInfo container =
+ new ContainerInfo.Builder()
+ .setContainerID(cid)
+ .setReplicationConfig(RatisReplicationConfig.getInstance(THREE))
+ .setState(HddsProtos.LifeCycleState.CLOSED)
+ .setOwner("TestDeletedBlockLog")
+ .setPipelineID(PipelineID.randomId())
+ .build();
+ final Set<ContainerReplica> replicaSet = cluster.getHddsDatanodes()
+ .subList(0, 3)
+ .stream()
+ .map(dn -> ContainerReplica.newBuilder()
+ .setContainerID(container.containerID())
+ .setContainerState(State.CLOSED)
+ .setDatanodeDetails(dn.getDatanodeDetails())
+ .build())
+ .collect(Collectors.toSet());
+ ContainerStateManager containerStateManager = getSCMLeader().
+ getContainerManager().getContainerStateManager();
+ containerStateManager.addContainer(container.getProtobuf());
+ for (ContainerReplica replica: replicaSet) {
+ containerStateManager.updateContainerReplica(
+ ContainerID.valueOf(cid), replica);
+ }
+ }
+
+ private StorageContainerManager getSCMLeader() {
+ return cluster.getStorageContainerManagersList()
+ .stream().filter(a -> a.getScmContext().isLeaderReady())
+ .collect(Collectors.toList()).get(0);
+ }
+
+ private void flush() throws Exception {
+ // only flush leader here, avoid the follower concurrent flush and write
+ getSCMLeader().getScmHAManager().asSCMHADBTransactionBuffer().flush();
+ }
+
+ @Test
+ public void testDeletedBlocksTxnSubcommand() throws Exception {
+ int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
+ int currentValidTxnNum;
+ // add 30 block deletion transactions
+ DeletedBlockLog deletedBlockLog = getSCMLeader().
+ getScmBlockManager().getDeletedBlockLog();
+ deletedBlockLog.addTransactions(generateData(30));
+ flush();
+ currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions();
+ LOG.info("Valid num of txns: {}", currentValidTxnNum);
+ Assertions.assertEquals(30, currentValidTxnNum);
+
+ // let the first 20 txns be failed
+ List<Long> txIds = new ArrayList<>();
+ for (int i = 1; i < 21; i++) {
+ txIds.add((long) i);
+ }
+ // increment retry count than threshold, count will be set to -1
+ for (int i = 0; i < maxRetry + 1; i++) {
+ deletedBlockLog.incrementCount(txIds);
+ }
+ flush();
+ currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions();
+ LOG.info("Valid num of txns: {}", currentValidTxnNum);
+ Assertions.assertEquals(10, currentValidTxnNum);
+
+ ContainerOperationClient scmClient = new ContainerOperationClient(conf);
+ CommandLine cmd;
+ // getFailedDeletedBlocksTxn cmd will print all the failed txns
+ GetFailedDeletedBlocksTxnSubcommand getCommand =
+ new GetFailedDeletedBlocksTxnSubcommand();
+ cmd = new CommandLine(getCommand);
+ cmd.parseArgs("-a");
+ getCommand.execute(scmClient);
+ int matchCount = 0;
+ Pattern p = Pattern.compile("\"txID\" : \\d+", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ while (m.find()) {
+ matchCount += 1;
+ }
+ Assertions.assertEquals(20, matchCount);
+
+ // print the first 10 failed txns info into file
+ cmd.parseArgs("-o", txnFile.getAbsolutePath(), "-c", "10");
+ getCommand.execute(scmClient);
+ Assertions.assertTrue(txnFile.exists());
+
+ ResetDeletedBlockRetryCountSubcommand resetCommand =
+ new ResetDeletedBlockRetryCountSubcommand();
+ cmd = new CommandLine(resetCommand);
+
+ // reset the txns in file
+ cmd.parseArgs("-i", txnFile.getAbsolutePath());
+ resetCommand.execute(scmClient);
+ flush();
+ currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions();
+ LOG.info("Valid num of txns: {}", currentValidTxnNum);
+ Assertions.assertEquals(20, currentValidTxnNum);
+
+ // reset the given txIds list
+ cmd.parseArgs("-l", "11,12,13,14,15");
+ resetCommand.execute(scmClient);
+ flush();
+ currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions();
+ LOG.info("Valid num of txns: {}", currentValidTxnNum);
+ Assertions.assertEquals(25, currentValidTxnNum);
+
+ // reset the non-existing txns and valid txns, should do nothing
+ cmd.parseArgs("-l", "1,2,3,4,5,100,101,102,103,104,105");
+ resetCommand.execute(scmClient);
+ flush();
+ currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions();
+ LOG.info("Valid num of txns: {}", currentValidTxnNum);
+ Assertions.assertEquals(25, currentValidTxnNum);
+
+ // reset all the result expired txIds, all transactions should be available
+ cmd.parseArgs("-a");
+ resetCommand.execute(scmClient);
+ flush();
+ currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions();
+ LOG.info("Valid num of txns: {}", currentValidTxnNum);
+ Assertions.assertEquals(30, currentValidTxnNum);
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestResetDeletedBlockRetryCountShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestResetDeletedBlockRetryCountShell.java
deleted file mode 100644
index 8e90864..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestResetDeletedBlockRetryCountShell.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.shell;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
-import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.admin.scm.ResetDeletedBlockRetryCountSubcommand;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.TimeoutException;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
-
-/**
- * Test for ResetDeletedBlockRetryCountSubcommand Cli.
- */
-public class TestResetDeletedBlockRetryCountShell {
-
- private static final Logger LOG = LoggerFactory
- .getLogger(TestResetDeletedBlockRetryCountShell.class);
- private MiniOzoneHAClusterImpl cluster = null;
- private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
- private String scmServiceId;
- private int numOfSCMs = 3;
-
- /**
- * Create a MiniOzoneHACluster for testing.
- *
- * @throws IOException
- */
- @BeforeEach
- public void init() throws Exception {
- conf = new OzoneConfiguration();
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
- scmServiceId = "scm-service-test1";
-
- conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
-
- cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
- .setClusterId(clusterId)
- .setScmId(scmId)
- .setSCMServiceId(scmServiceId)
- .setNumOfStorageContainerManagers(numOfSCMs)
- .setNumOfActiveSCMs(numOfSCMs)
- .setNumOfOzoneManagers(1)
- .build();
- cluster.waitForClusterToBeReady();
- }
-
- /**
- * Shutdown MiniDFSCluster.
- */
- @AfterEach
- public void shutdown() {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
-
- //<containerID, List<blockID>>
- private Map<Long, List<Long>> generateData(int dataSize) {
- Map<Long, List<Long>> blockMap = new HashMap<>();
- Random random = new Random(1);
- int continerIDBase = random.nextInt(100);
- int localIDBase = random.nextInt(1000);
- for (int i = 0; i < dataSize; i++) {
- long containerID = continerIDBase + i;
- List<Long> blocks = new ArrayList<>();
- for (int j = 0; j < 5; j++) {
- long localID = localIDBase + j;
- blocks.add(localID);
- }
- blockMap.put(containerID, blocks);
- }
- return blockMap;
- }
-
- private StorageContainerManager getSCMLeader() {
- return cluster.getStorageContainerManagersList()
- .stream().filter(a -> a.getScmContext().isLeaderReady())
- .collect(Collectors.toList()).get(0);
- }
-
- @Test
- public void testResetCmd() throws IOException, TimeoutException {
- int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
- // add some block deletion transactions
- DeletedBlockLog deletedBlockLog = getSCMLeader().
- getScmBlockManager().getDeletedBlockLog();
- deletedBlockLog.addTransactions(generateData(30));
- getSCMLeader().getScmHAManager().asSCMHADBTransactionBuffer().flush();
- LOG.info("Valid num of txns: {}", deletedBlockLog.
- getNumOfValidTransactions());
- Assertions.assertEquals(30, deletedBlockLog.getNumOfValidTransactions());
-
- List<Long> txIds = new ArrayList<>();
- for (int i = 1; i < 31; i++) {
- txIds.add((long) i);
- }
- // increment retry count than threshold, count will be set to -1
- for (int i = 0; i < maxRetry + 1; i++) {
- deletedBlockLog.incrementCount(txIds);
- }
- for (StorageContainerManager scm:
- cluster.getStorageContainerManagersList()) {
- scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
- }
- LOG.info("Valid num of txns: {}", deletedBlockLog.
- getNumOfValidTransactions());
- Assertions.assertEquals(0, deletedBlockLog.getNumOfValidTransactions());
-
- ResetDeletedBlockRetryCountSubcommand subcommand =
- new ResetDeletedBlockRetryCountSubcommand();
- ContainerOperationClient scmClient = new ContainerOperationClient(conf);
- CommandLine c = new CommandLine(subcommand);
- // reset the given txIds list, only these transactions should be available
- c.parseArgs("-l", "1,2,3,4,5");
- subcommand.execute(scmClient);
- getSCMLeader().getScmHAManager().asSCMHADBTransactionBuffer().flush();
- LOG.info("Valid num of txns: {}", deletedBlockLog.
- getNumOfValidTransactions());
- Assertions.assertEquals(5, deletedBlockLog.getNumOfValidTransactions());
-
- // reset all the result expired txIds, all transactions should be available
- c.parseArgs("-a");
- subcommand.execute(scmClient);
- getSCMLeader().getScmHAManager().asSCMHADBTransactionBuffer().flush();
- LOG.info("Valid num of txns: {}", deletedBlockLog.
- getNumOfValidTransactions());
- Assertions.assertEquals(30, deletedBlockLog.getNumOfValidTransactions());
- }
-}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index a2b2994..33ee575 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -1028,6 +1028,10 @@
repeated KeyInfo keyInfo = 1;
}
+message KeyRenameInfo {
+ repeated string keyNames = 1;
+}
+
message OzoneFileStatusProto {
optional KeyInfo keyInfo = 2;
optional uint64 blockSize = 3;
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 0229e86..285fb02 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.ozone.om.helpers.OmDBTenantState;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock;
@@ -374,6 +375,8 @@ String getMultipartKey(String volume, String bucket, String key, String
Table<String, SnapshotInfo> getSnapshotInfoTable();
+ Table<String, OmKeyRenameInfo> getRenamedKeyTable();
+
/**
* Gets the OM Meta table.
* @return meta table reference.
@@ -482,6 +485,18 @@ default String getOzonePathKey(long volumeId, long bucketId,
String getOpenFileName(long volumeId, long bucketId,
long parentObjectId, String fileName, long id);
+
+ /**
+ * Given a volume, bucket and a objectID, return the DB key name in
+ * renamedKeyTable.
+ *
+ * @param volume - volume name
+ * @param bucket - bucket name
+ * @param objectID - objectID of the key
+ * @return DB rename key as String.
+ */
+ String getRenameKey(String volume, String bucket, long objectID);
+
/**
* Returns the DB key name of a multipart upload key in OM metadata store.
*
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyRenameInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyRenameInfoCodec.java
new file mode 100644
index 0000000..dbd3b6f
--- /dev/null
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyRenameInfoCodec.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyRenameInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode OmKeyRenameInfo as byte array.
+ */
+public class OmKeyRenameInfoCodec implements Codec<OmKeyRenameInfo> {
+ @Override
+ public byte[] toPersistedFormat(OmKeyRenameInfo object) throws IOException {
+ Preconditions
+ .checkNotNull(object, "Null object can't be converted to byte array.");
+ return object.getProto().toByteArray();
+ }
+
+ @Override
+ public OmKeyRenameInfo fromPersistedFormat(byte[] rawData)
+ throws IOException {
+ Preconditions.checkNotNull(rawData,
+ "Null byte array can't converted to real object.");
+ try {
+ return OmKeyRenameInfo.getFromProto(KeyRenameInfo.parseFrom(rawData));
+ } catch (InvalidProtocolBufferException ex) {
+ throw new IllegalArgumentException(
+ "Can't encode the the raw data from the byte array", ex);
+ }
+ }
+
+ @Override
+ public OmKeyRenameInfo copyObject(OmKeyRenameInfo object) {
+ return object.copyObject();
+ }
+}
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
index 4101cf3..654c8d0 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
@@ -21,4 +21,4 @@
/**
* Helpers for OM storage proto layer.
*/
-package org.apache.hadoop.ozone.om.helpers;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.helpers;
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/package-info.java
index df49463..30b6fed 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/package-info.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -21,4 +21,4 @@
/**
* OMMetadataManager.
*/
-package org.apache.hadoop.ozone.om;
\ No newline at end of file
+package org.apache.hadoop.ozone.om;
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
index 4b28d47..3a008f9 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
@@ -21,4 +21,4 @@
/**
* OMTransactionInfo.
*/
-package org.apache.hadoop.ozone.om.ratis;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.ratis;
diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java
index 346742a..d88dcdf 100644
--- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java
+++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java
@@ -96,4 +96,4 @@ public void testToAndFromPersistedFormat() throws IOException {
assertTrue("Load saved prefix info should match",
opiLoad.equals(opiSave));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java
index 8b5690a..3de2847 100644
--- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java
+++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java
@@ -21,4 +21,4 @@
/**
* Unit tests for codec's in OM.
*/
-package org.apache.hadoop.ozone.om.codec;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.codec;
diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java
index bda29d2..e6d1bb1 100644
--- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java
+++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java
@@ -21,4 +21,4 @@
/**
* Unit tests for helpers in OM.
*/
-package org.apache.hadoop.ozone.om.helpers;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.helpers;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index d194360..faeaae6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -1309,6 +1309,9 @@ public void incEcBucketCreateFailsTotal() {
}
public void unRegister() {
+ if (dbCheckpointMetrics != null) {
+ dbCheckpointMetrics.unRegister();
+ }
MetricsSystem ms = DefaultMetricsSystem.instance();
ms.unregisterSource(SOURCE_NAME);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
index cc12ce3..ed5a93e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java
@@ -34,4 +34,4 @@ void bootstrap(OzoneConfiguration conf, boolean force) throws IOException,
AuthenticationException;
void startAndCancelPrepare(OzoneConfiguration conf) throws IOException,
AuthenticationException;
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
index aee1e00..b5a2ba4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
@@ -264,4 +264,4 @@ protected Properties getNodeProperties() {
public static File getOmDbDir(ConfigurationSource conf) {
return ServerUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 2a0241e..ede9b58 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -64,6 +64,7 @@
import org.apache.hadoop.ozone.om.codec.OmDBTenantStateCodec;
import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec;
import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmKeyRenameInfoCodec;
import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec;
import org.apache.hadoop.ozone.om.codec.OmDBSnapshotInfoCodec;
import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec;
@@ -83,6 +84,7 @@
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -193,6 +195,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
* |----------------------------------------------------------------------|
* | snapshotInfoTable | /volume/bucket/snapshotName -> SnapshotInfo |
* |----------------------------------------------------------------------|
+ * | renamedKeyTable | /volumeName/bucketName/objectID -> OmKeyRenameInfo|
+ * |----------------------------------------------------------------------|
*/
public static final String USER_TABLE = "userTable";
@@ -219,6 +223,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
"principalToAccessIdsTable";
public static final String TENANT_STATE_TABLE = "tenantStateTable";
public static final String SNAPSHOT_INFO_TABLE = "snapshotInfoTable";
+ public static final String RENAMED_KEY_TABLE = "renamedKeyTable";
static final String[] ALL_TABLES = new String[] {
USER_TABLE,
@@ -240,7 +245,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
TENANT_ACCESS_ID_TABLE,
PRINCIPAL_TO_ACCESS_IDS_TABLE,
TENANT_STATE_TABLE,
- SNAPSHOT_INFO_TABLE
+ SNAPSHOT_INFO_TABLE,
+ RENAMED_KEY_TABLE
};
private DBStore store;
@@ -267,7 +273,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
private Table tenantAccessIdTable;
private Table principalToAccessIdsTable;
private Table tenantStateTable;
+
private Table snapshotInfoTable;
+ private Table renamedKeyTable;
private boolean isRatisEnabled;
private boolean ignorePipelineinKey;
@@ -502,6 +510,7 @@ public static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) {
.addTable(PRINCIPAL_TO_ACCESS_IDS_TABLE)
.addTable(TENANT_STATE_TABLE)
.addTable(SNAPSHOT_INFO_TABLE)
+ .addTable(RENAMED_KEY_TABLE)
.addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
.addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
.addCodec(RepeatedOmKeyInfo.class,
@@ -516,10 +525,9 @@ public static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) {
.addCodec(OmDirectoryInfo.class, new OmDirectoryInfoCodec())
.addCodec(OmDBTenantState.class, new OmDBTenantStateCodec())
.addCodec(OmDBAccessIdInfo.class, new OmDBAccessIdInfoCodec())
- .addCodec(OmDBUserPrincipalInfo.class,
- new OmDBUserPrincipalInfoCodec())
- .addCodec(SnapshotInfo.class,
- new OmDBSnapshotInfoCodec());
+ .addCodec(OmDBUserPrincipalInfo.class, new OmDBUserPrincipalInfoCodec())
+ .addCodec(SnapshotInfo.class, new OmDBSnapshotInfoCodec())
+ .addCodec(OmKeyRenameInfo.class, new OmKeyRenameInfoCodec());
}
/**
@@ -622,6 +630,11 @@ protected void initializeOmTables(boolean addCacheMetrics)
String.class, SnapshotInfo.class);
checkTableStatus(snapshotInfoTable, SNAPSHOT_INFO_TABLE, addCacheMetrics);
+ // objectID -> renamedKeys (renamed keys for key table)
+ renamedKeyTable = this.store.getTable(RENAMED_KEY_TABLE,
+ String.class, OmKeyRenameInfo.class);
+ checkTableStatus(renamedKeyTable, RENAMED_KEY_TABLE, addCacheMetrics);
+ // TODO: [SNAPSHOT] Initialize table lock for renamedKeyTable.
}
/**
@@ -1614,6 +1627,11 @@ public Table<String, SnapshotInfo> getSnapshotInfoTable() {
return snapshotInfoTable;
}
+ @Override
+ public Table<String, OmKeyRenameInfo> getRenamedKeyTable() {
+ return renamedKeyTable;
+ }
+
/**
* Update store used by subclass.
*
@@ -1677,6 +1695,16 @@ public String getOpenFileName(long volumeId, long bucketId,
}
@Override
+ public String getRenameKey(String volumeName, String bucketName,
+ long objectID) {
+ StringBuilder renameKey = new StringBuilder();
+ renameKey.append(OM_KEY_PREFIX).append(volumeName);
+ renameKey.append(OM_KEY_PREFIX).append(bucketName);
+ renameKey.append(OM_KEY_PREFIX).append(objectID);
+ return renameKey.toString();
+ }
+
+ @Override
public String getMultipartKey(long volumeId, long bucketId,
long parentID, String fileName,
String uploadId) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index 6dfb2d3..1c1c683 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -78,7 +78,8 @@ public final class OmSnapshotManager implements AutoCloseable {
this.snapshotDiffDb =
createDbForSnapshotDiff(ozoneManager.getConfiguration());
- this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ);
+ this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ,
+ ozoneManager.getConfiguration());
// size of lru cache
int cacheSize = ozoneManager.getConfiguration().getInt(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index 42d657a..21e5331 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -235,4 +235,4 @@ public void startAndCancelPrepare(OzoneConfiguration conf)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index aa40cc8..ae88859 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
@@ -229,6 +230,15 @@ String.class, new StringCodec(), OmKeyInfo.class,
SnapshotInfo.class,
new OmDBSnapshotInfoCodec());
+ public static final DBColumnFamilyDefinition<String, OmKeyRenameInfo>
+ RENAMED_KEY_TABLE =
+ new DBColumnFamilyDefinition<>(
+ OmMetadataManagerImpl.RENAMED_KEY_TABLE,
+ String.class, // /volumeName/bucketName/objectID
+ new StringCodec(),
+ OmKeyRenameInfo.class, // list of key renames
+ new OmKeyRenameInfoCodec());
+
@Override
public String getName() {
return OzoneConsts.OM_DB_NAME;
@@ -248,7 +258,7 @@ public String getLocationConfigKey() {
FILE_TABLE, OPEN_FILE_TABLE, DELETED_DIR_TABLE, META_TABLE,
TENANT_ACCESS_ID_TABLE,
PRINCIPAL_TO_ACCESS_IDS_TABLE, TENANT_STATE_TABLE,
- SNAPSHOT_INFO_TABLE};
+ SNAPSHOT_INFO_TABLE, RENAMED_KEY_TABLE};
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java
index 030d484..6e6c6cc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java
@@ -24,4 +24,4 @@
/**
* OM DB definitions.
*/
-package org.apache.hadoop.ozone.om.codec;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.codec;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java
index 3255185..f0ec577 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.om.fs;
/*
This package contains the Ozone Manager FileSystem interface classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
index 7904d5d..868a704 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -18,4 +18,4 @@
package org.apache.hadoop.ozone.om;
/*
This package contains the Ozone Manager classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index 6c8048a..94520e1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -649,4 +649,4 @@ private synchronized void swapCurrentAndReadyBuffer() {
public OzoneManagerDoubleBufferMetrics getOzoneManagerDoubleBufferMetrics() {
return ozoneManagerDoubleBufferMetrics;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
index ea25f13..9161a92 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains classes for the OM Ratis server implementation.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 61e0a86..9b3347b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -489,4 +489,4 @@ private static OMRequest changeBucketLayout(OMRequest originalRequest,
return originalRequest.toBuilder()
.setCreateBucketRequest(newCreateRequest).build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
index f0ca3b4..575d943 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains classes related to bucket requests.
*/
-package org.apache.hadoop.ozone.om.request.bucket;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.bucket;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java
index 3184500..4c4a3c7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains classes related to file requests.
*/
-package org.apache.hadoop.ozone.om.request.file;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.file;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
index f28cade..8bc98ec 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
@@ -233,4 +233,4 @@ private OMClientResponse getOmClientResponse(long clientID,
return new OMAllocateBlockResponseWithFSO(omResponse.build(), openKeyInfo,
clientID, getBucketLayout(), volumeId, omBucketInfo.getObjectID());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java
index 0a027cc..cc0b332 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java
@@ -19,4 +19,4 @@
/**
* Package contains classes related to acl requests for prefix.
*/
-package org.apache.hadoop.ozone.om.request.key.acl.prefix;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.key.acl.prefix;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java
index af20fe1..373124f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains classes related to key requests.
*/
-package org.apache.hadoop.ozone.om.request.key;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.key;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java
index ee324cf..7c99ffb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java
@@ -18,4 +18,4 @@
/**
* This package contains classes for handling OMRequests.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java
index c7608e8..0a41793 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains classes which handle security requests.
*/
-package org.apache.hadoop.ozone.om.request.security;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.security;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java
index 60e3e2c..d8cbdad 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java
@@ -187,4 +187,4 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
return omClientResponse;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java
index 833c7c8..bda55f4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java
@@ -224,4 +224,4 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
return omClientResponse;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java
index f1edc13..452a7d8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains classes related to snapshot requests.
*/
-package org.apache.hadoop.ozone.om.request.snapshot;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.snapshot;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/package-info.java
index e82bab7..8f5ac13 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/package-info.java
@@ -59,4 +59,4 @@
* In general, it is a good practice to have the request handling code, and the
* validations tied together in one class.
*/
-package org.apache.hadoop.ozone.om.request.validation;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.validation;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
index 79c4afd..cf97c59 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
@@ -19,4 +19,4 @@
/**
* Package contains classes related to volume acl requests and responses.
*/
-package org.apache.hadoop.ozone.om.request.volume.acl;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.volume.acl;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
index 708f708..588ad7e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
@@ -19,4 +19,4 @@
/**
* Package contains classes related to volume requests.
*/
-package org.apache.hadoop.ozone.om.request.volume;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.volume;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java
index dd26272..9f90a4a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains classes for handling bucket acl responses.
*/
-package org.apache.hadoop.ozone.om.response.bucket.acl;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.bucket.acl;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
index e70c1c3..c8319c9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains classes related to bucket responses.
*/
-package org.apache.hadoop.ozone.om.response.bucket;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.bucket;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java
index 135eca9..6c23f1a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains classes related to file responses.
*/
-package org.apache.hadoop.ozone.om.response.file;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.file;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
index b5cc494..e755e75 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
@@ -30,11 +31,12 @@
import javax.annotation.Nonnull;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.RENAMED_KEY_TABLE;
/**
* Response for RenameKey request.
*/
-@CleanupTableInfo(cleanupTables = {KEY_TABLE})
+@CleanupTableInfo(cleanupTables = {KEY_TABLE, RENAMED_KEY_TABLE})
public class OMKeyRenameResponse extends OmKeyResponse {
private String fromKeyName;
@@ -73,13 +75,26 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
String volumeName = renameKeyInfo.getVolumeName();
String bucketName = renameKeyInfo.getBucketName();
+ String fromDbKey = omMetadataManager
+ .getOzoneKey(volumeName, bucketName, fromKeyName);
omMetadataManager.getKeyTable(getBucketLayout())
- .deleteWithBatch(batchOperation,
- omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName));
+ .deleteWithBatch(batchOperation, fromDbKey);
omMetadataManager.getKeyTable(getBucketLayout())
.putWithBatch(batchOperation,
omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName),
renameKeyInfo);
+
+ String renameDbKey = omMetadataManager.getRenameKey(
+ renameKeyInfo.getVolumeName(), renameKeyInfo.getBucketName(),
+ renameKeyInfo.getObjectID());
+ OmKeyRenameInfo omKeyRenameInfo = omMetadataManager.getRenamedKeyTable()
+ .get(renameDbKey);
+ if (omKeyRenameInfo == null) {
+ omKeyRenameInfo = new OmKeyRenameInfo(fromDbKey);
+ omMetadataManager.getRenamedKeyTable().putWithBatch(
+ batchOperation, renameDbKey, omKeyRenameInfo);
+ }
+
}
public OmKeyInfo getRenameKeyInfo() {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
index 4b5f6d1..eb8f4a2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -33,11 +34,13 @@
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.RENAMED_KEY_TABLE;
/**
* Response for RenameKey request - prefix layout.
*/
-@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE})
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE,
+ RENAMED_KEY_TABLE})
public class OMKeyRenameResponseWithFSO extends OMKeyRenameResponse {
private boolean isRenameDirectory;
@@ -88,7 +91,21 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
.deleteWithBatch(batchOperation, getFromKeyName());
omMetadataManager.getKeyTable(getBucketLayout())
.putWithBatch(batchOperation, getToKeyName(), getRenameKeyInfo());
+
+ String renameDbKey = omMetadataManager.getRenameKey(
+ getRenameKeyInfo().getVolumeName(),
+ getRenameKeyInfo().getBucketName(),
+ getRenameKeyInfo().getObjectID());
+
+ OmKeyRenameInfo omKeyRenameInfo = omMetadataManager.getRenamedKeyTable()
+ .get(renameDbKey);
+ if (omKeyRenameInfo == null) {
+ omKeyRenameInfo = new OmKeyRenameInfo(getFromKeyName());
+ omMetadataManager.getRenamedKeyTable().putWithBatch(
+ batchOperation, renameDbKey, omKeyRenameInfo);
+ }
}
+
if (fromKeyParent != null) {
addDirToDBBatch(omMetadataManager, fromKeyParent,
volumeId, bucketId, batchOperation);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java
index edba72c..cf09247 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java
@@ -104,4 +104,4 @@ public List<OmKeyInfo> getOmKeyInfoList() {
public OmBucketInfo getOmBucketInfo() {
return omBucketInfo;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java
index b09ff9e..dd69092 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysRenameResponse.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmRenameKeys;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -31,13 +32,14 @@
import java.util.Map;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.RENAMED_KEY_TABLE;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME;
/**
* Response for RenameKeys request.
*/
-@CleanupTableInfo(cleanupTables = {KEY_TABLE})
+@CleanupTableInfo(cleanupTables = {KEY_TABLE, RENAMED_KEY_TABLE})
public class OMKeysRenameResponse extends OMClientResponse {
private OmRenameKeys omRenameKeys;
@@ -77,14 +79,28 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
String fromKeyName = entry.getKey();
OmKeyInfo newKeyInfo = entry.getValue();
String toKeyName = newKeyInfo.getKeyName();
+ String fromDbKey = omMetadataManager
+ .getOzoneKey(volumeName, bucketName, fromKeyName);
omMetadataManager.getKeyTable(getBucketLayout())
- .deleteWithBatch(batchOperation, omMetadataManager
- .getOzoneKey(volumeName, bucketName, fromKeyName));
+ .deleteWithBatch(batchOperation, fromDbKey);
omMetadataManager.getKeyTable(getBucketLayout())
.putWithBatch(batchOperation,
omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName),
newKeyInfo);
+
+
+ String renameDbKey = omMetadataManager.getRenameKey(
+ newKeyInfo.getVolumeName(), newKeyInfo.getBucketName(),
+ newKeyInfo.getObjectID());
+
+ OmKeyRenameInfo omKeyRenameInfo = omMetadataManager.getRenamedKeyTable()
+ .get(renameDbKey);
+ if (omKeyRenameInfo == null) {
+ omKeyRenameInfo = new OmKeyRenameInfo(fromDbKey);
+ omMetadataManager.getRenamedKeyTable().putWithBatch(
+ batchOperation, renameDbKey, omKeyRenameInfo);
+ }
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java
index 2097d22..dbca459 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains classes related to key responses.
*/
-package org.apache.hadoop.ozone.om.response.key;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.key;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java
index d66cac7..53a9d62 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java
@@ -21,4 +21,4 @@
/**
* This package contains classes for the OM Responses.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java
index 014bc42..4e68df6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains classes which handle security request responses.
*/
-package org.apache.hadoop.ozone.om.response.security;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.security;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java
index e5ba1c0..718b67e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java
@@ -19,8 +19,11 @@
package org.apache.hadoop.ozone.om.response.snapshot;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
@@ -28,12 +31,15 @@
import javax.annotation.Nonnull;
import java.io.IOException;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.RENAMED_KEY_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE;
/**
* Response for OMSnapshotCreateRequest.
*/
-@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE})
+@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE, RENAMED_KEY_TABLE})
public class OMSnapshotCreateResponse extends OMClientResponse {
public OMSnapshotCreateResponse(@Nonnull OMResponse omResponse,
@@ -68,5 +74,25 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
// Add to db
omMetadataManager.getSnapshotInfoTable().putWithBatch(batchOperation,
key, snapshotInfo);
+
+ // TODO: [SNAPSHOT] Move to createOmSnapshotCheckpoint and add table lock
+ // Remove all entries from renamedKeyTable
+ TableIterator<String, ? extends Table.KeyValue<String, OmKeyRenameInfo>>
+ iterator = omMetadataManager.getRenamedKeyTable().iterator();
+
+ String dbSnapshotBucketKey = omMetadataManager.getBucketKey(
+ snapshotInfo.getVolumeName(), snapshotInfo.getBucketName())
+ + OM_KEY_PREFIX;
+ iterator.seek(dbSnapshotBucketKey);
+
+ while (iterator.hasNext()) {
+ String renameDbKey = iterator.next().getKey();
+ if (!renameDbKey.startsWith(dbSnapshotBucketKey)) {
+ break;
+ }
+ omMetadataManager.getRenamedKeyTable()
+ .deleteWithBatch(batchOperation, renameDbKey);
+ }
+
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
index 478a19d..b265997 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java
@@ -19,4 +19,4 @@
/**
* Package contains classes related to volume requests.
*/
-package org.apache.hadoop.ozone.om.response.volume;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.volume;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
index 06c601a..58006e1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
@@ -22,7 +22,9 @@
import java.util.UUID;
import org.apache.hadoop.hdds.utils.db.CodecRegistry;
import org.apache.hadoop.hdds.utils.db.IntegerCodec;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -42,6 +44,7 @@
import org.apache.ozone.rocksdb.util.RdbUtil;
import org.apache.ozone.rocksdiff.DifferSnapshotInfo;
import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.apache.ozone.rocksdiff.RocksDiffUtils;
import org.jetbrains.annotations.NotNull;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
@@ -72,10 +75,15 @@ public class SnapshotDiffManager {
private final ManagedRocksDB db;
private final CodecRegistry codecRegistry;
+ private OzoneConfiguration configuration;
+
+
public SnapshotDiffManager(ManagedRocksDB db,
- RocksDBCheckpointDiffer differ) {
+ RocksDBCheckpointDiffer differ,
+ OzoneConfiguration conf) {
this.db = db;
this.differ = differ;
+ this.configuration = conf;
this.codecRegistry = new CodecRegistry();
// Integers are used for indexing persistent list.
@@ -123,6 +131,7 @@ private DifferSnapshotInfo getDSIFromSI(SnapshotInfo snapshotInfo,
getTablePrefixes(snapshotOMMM, volumeName, bucketName));
}
+ @SuppressWarnings("checkstyle:methodlength")
public SnapshotDiffReport getSnapshotDiffReport(final String volume,
final String bucket,
final OmSnapshot fromSnapshot,
@@ -197,14 +206,22 @@ public SnapshotDiffReport getSnapshotDiffReport(final String volume,
codecRegistry,
DiffReportEntry.class);
- final Table<String, OmKeyInfo> fsKeyTable = fromSnapshot
- .getMetadataManager().getKeyTable(bucketLayout);
- final Table<String, OmKeyInfo> tsKeyTable = toSnapshot
- .getMetadataManager().getKeyTable(bucketLayout);
+ final Table<String, OmKeyInfo> fsKeyTable =
+ fromSnapshot.getMetadataManager().getKeyTable(bucketLayout);
+ final Table<String, OmKeyInfo> tsKeyTable =
+ toSnapshot.getMetadataManager().getKeyTable(bucketLayout);
+
+ boolean useFullDiff = configuration.getBoolean(
+ OzoneConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF,
+ OzoneConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT);
+
+ Map<String, String> tablePrefixes =
+ getTablePrefixes(toSnapshot.getMetadataManager(), volume, bucket);
+
final Set<String> deltaFilesForKeyOrFileTable =
getDeltaFiles(fromSnapshot, toSnapshot,
- Collections.singletonList(fsKeyTable.getName()),
- fsInfo, tsInfo, volume, bucket);
+ Collections.singletonList(fsKeyTable.getName()), fsInfo, tsInfo,
+ useFullDiff, tablePrefixes);
addToObjectIdMap(fsKeyTable,
tsKeyTable,
@@ -212,7 +229,7 @@ public SnapshotDiffReport getSnapshotDiffReport(final String volume,
objectIdToKeyNameMapForFromSnapshot,
objectIdToKeyNameMapForToSnapshot,
objectIDsToCheckMap,
- false);
+ tablePrefixes);
if (bucketLayout.isFileSystemOptimized()) {
// add to object ID map for directory.
@@ -222,16 +239,15 @@ public SnapshotDiffReport getSnapshotDiffReport(final String volume,
toSnapshot.getMetadataManager().getDirectoryTable();
final Set<String> deltaFilesForDirTable =
getDeltaFiles(fromSnapshot, toSnapshot,
- Collections.singletonList(fsDirTable.getName()),
- fsInfo, tsInfo, volume, bucket);
-
+ Collections.singletonList(fsDirTable.getName()), fsInfo, tsInfo,
+ useFullDiff, tablePrefixes);
addToObjectIdMap(fsDirTable,
tsDirTable,
deltaFilesForDirTable,
objectIdToKeyNameMapForFromSnapshot,
objectIdToKeyNameMapForToSnapshot,
objectIDsToCheckMap,
- true);
+ tablePrefixes);
}
generateDiffReport(requestId,
@@ -281,7 +297,11 @@ private void addToObjectIdMap(Table<String, ? extends WithObjectID> fsTable,
PersistentMap<Long, String> oldObjIdToKeyMap,
PersistentMap<Long, String> newObjIdToKeyMap,
PersistentSet<Long> objectIDsToCheck,
- boolean isDirectoryTable) {
+ Map<String, String> tablePrefixes)
+ throws IOException {
+
+ boolean isDirectoryTable =
+ fsTable.getName().equals(OmMetadataManagerImpl.DIRECTORY_TABLE);
if (deltaFiles.isEmpty()) {
return;
@@ -292,7 +312,8 @@ private void addToObjectIdMap(Table<String, ? extends WithObjectID> fsTable,
try {
final WithObjectID oldKey = fsTable.get(key);
final WithObjectID newKey = tsTable.get(key);
- if (areKeysEqual(oldKey, newKey)) {
+ if (areKeysEqual(oldKey, newKey) || !isKeyInBucket(key, tablePrefixes,
+ fsTable.getName())) {
// We don't have to do anything.
return;
}
@@ -333,14 +354,16 @@ private String getKeyOrDirectoryName(boolean isDirectory,
private Set<String> getDeltaFiles(OmSnapshot fromSnapshot,
OmSnapshot toSnapshot, List<String> tablesToLookUp,
SnapshotInfo fsInfo, SnapshotInfo tsInfo,
- String volume, String bucket)
+ boolean useFullDiff, Map<String, String> tablePrefixes)
throws RocksDBException, IOException {
// TODO: Refactor the parameter list
final Set<String> deltaFiles = new HashSet<>();
// Check if compaction DAG is available, use that if so
- if (differ != null && fsInfo != null && tsInfo != null) {
+ if (differ != null && fsInfo != null && tsInfo != null && !useFullDiff) {
+ String volume = fsInfo.getVolumeName();
+ String bucket = fsInfo.getBucketName();
// Construct DifferSnapshotInfo
final DifferSnapshotInfo fromDSI =
getDSIFromSI(fsInfo, fromSnapshot, volume, bucket);
@@ -385,6 +408,7 @@ private Set<String> getDeltaFiles(OmSnapshot fromSnapshot,
deltaFiles.addAll(fromSnapshotFiles);
deltaFiles.addAll(toSnapshotFiles);
+ RocksDiffUtils.filterRelevantSstFiles(deltaFiles, tablePrefixes);
}
return deltaFiles;
@@ -553,4 +577,25 @@ private boolean areKeysEqual(WithObjectID oldKey, WithObjectID newKey) {
}
return false;
}
+
+
+ /**
+ * check if the given key is in the bucket specified by tablePrefix map.
+ */
+ private boolean isKeyInBucket(String key, Map<String, String> tablePrefixes,
+ String tableName) {
+ String volumeBucketDbPrefix;
+ // In case of FSO - either File/Directory table
+ // the key Prefix would be volumeId/bucketId and
+ // in case of non-fso - volumeName/bucketName
+ if (tableName.equals(
+ OmMetadataManagerImpl.DIRECTORY_TABLE) || tableName.equals(
+ OmMetadataManagerImpl.FILE_TABLE)) {
+ volumeBucketDbPrefix =
+ tablePrefixes.get(OmMetadataManagerImpl.DIRECTORY_TABLE);
+ } else {
+ volumeBucketDbPrefix = tablePrefixes.get(OmMetadataManagerImpl.KEY_TABLE);
+ }
+ return key.startsWith(volumeBucketDbPrefix);
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java
index 3c82a69..47cd36a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains OM Ratis Snapshot related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/package-info.java
index d663049..9ddfc37 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/package-info.java
@@ -20,4 +20,4 @@
/**
* This package contains OM Upgrade related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
index 9bc393d..3c1f621 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
@@ -19,4 +19,4 @@
/**
* OM protocol buffer translators.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
index 20e747a..926bbad 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java
@@ -19,4 +19,4 @@
/**
* OM native acl implementation.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/package-info.java
index e5c9d4a..810a7d4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/package-info.java
@@ -19,4 +19,4 @@
/**
* Security related classes.
- */
\ No newline at end of file
+ */
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java
index 432335f..46f2287 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManagerImpl.java
@@ -236,4 +236,4 @@ private void assignUserToTenantInDB(String tenantId, String accessId,
omMetadataManager.getTenantAccessIdTable().put(accessId,
new OmDBAccessIdInfo(tenantId, user, isAdmin, isDelegatedAdmin));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
index fa97c34..5bd4cde 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
@@ -773,4 +773,4 @@ public void testListSnapshotDoesNotListOtherBucketSnapshots()
assertTrue(snapshotInfo.getName().startsWith(snapshotName2));
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
index 5e8909a..4c75ed9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.server.http.BaseHttpServer;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpConfig.Policy;
@@ -40,6 +41,7 @@
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@@ -55,6 +57,7 @@ public class TestOzoneManagerHttpServer {
private static String sslConfDir;
private static OzoneConfiguration conf;
private static URLConnectionFactory connectionFactory;
+ private static File ozoneMetadataDirectory;
@Parameters public static Collection<Object[]> policy() {
Object[][] params = new Object[][] {
@@ -74,7 +77,12 @@ public TestOzoneManagerHttpServer(Policy policy) {
@BeforeClass public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
- base.mkdirs();
+
+ // Create metadata directory
+ ozoneMetadataDirectory = new File(BASEDIR, "metadata");
+ ozoneMetadataDirectory.mkdirs();
+
+ // Initialize the OzoneConfiguration
conf = new OzoneConfiguration();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(
@@ -86,6 +94,14 @@ public TestOzoneManagerHttpServer(Policy policy) {
KeyStoreTestUtil.getClientSSLConfigFileName());
conf.set(OzoneConfigKeys.OZONE_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
KeyStoreTestUtil.getServerSSLConfigFileName());
+
+ // Set up OM HTTP and HTTPS addresses
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
+ ozoneMetadataDirectory.getAbsolutePath());
+ conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "localhost:0");
+ conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "localhost:0");
+ conf.set(OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY, "localhost");
+ conf.set(OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY, "localhost");
}
@AfterClass public static void tearDown() throws Exception {
@@ -96,11 +112,6 @@ public TestOzoneManagerHttpServer(Policy policy) {
@Test public void testHttpPolicy() throws Exception {
conf.set(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY, policy.name());
- conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "localhost:0");
- conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "localhost:0");
- conf.set(OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY, "localhost");
- conf.set(OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY, "localhost");
-
OzoneManagerHttpServer server = null;
try {
server = new OzoneManagerHttpServer(conf, null);
@@ -117,7 +128,30 @@ public TestOzoneManagerHttpServer(Policy policy) {
canAccess("https", server.getHttpsAddress())));
Assert.assertTrue(implies(policy.isHttpsEnabled(),
!canAccess("http", server.getHttpsAddress())));
+ } finally {
+ if (server != null) {
+ server.stop();
+ }
+ }
+ }
+ @Test
+ // Verify if jetty-dir will be created inside ozoneMetadataDirectory path
+ public void testJettyDirectoryCreation() throws Exception {
+ OzoneManagerHttpServer server = null;
+ try {
+ server = new OzoneManagerHttpServer(conf, null);
+ DefaultMetricsSystem.initialize("TestOzoneManagerHttpServer");
+ server.start();
+ // Checking if the /webserver directory does get created
+ File webServerDir =
+ new File(ozoneMetadataDirectory, BaseHttpServer.SERVER_DIR);
+ Assert.assertTrue(webServerDir.exists());
+ // Verify that the jetty directory is set correctly
+ String expectedJettyDirLocation =
+ ozoneMetadataDirectory.getAbsolutePath() + BaseHttpServer.SERVER_DIR;
+ Assertions.assertEquals(expectedJettyDirLocation,
+ server.getJettyBaseTmpDir());
} finally {
if (server != null) {
server.stop();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
index 754134d..da9b7dc 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java
@@ -178,4 +178,4 @@ public void startAndCancelPrepare(OzoneConfiguration conf)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java
index f6b4ed9..9995ef7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java
@@ -459,4 +459,4 @@ public void testRangerAclStrings() throws Exception {
// cleanup.
controller.deletePolicy(policy.getName());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
index b89c651..9d34c2d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for bucket requests.
*/
-package org.apache.hadoop.ozone.om.request.bucket;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.bucket;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java
index ab81a7e..4d7f992 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for file requests.
*/
-package org.apache.hadoop.ozone.om.request.file;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.file;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java
index 2034670..7ed51e7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for key requests.
*/
-package org.apache.hadoop.ozone.om.request.key;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.key;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
index fd0a9d3..8ff94bc 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
@@ -153,4 +153,4 @@ protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
public BucketLayout getBucketLayout() {
return BucketLayout.FILE_SYSTEM_OPTIMIZED;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/package-info.java
index 4a72a86..d783a7f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for delegation token requests.
*/
-package org.apache.hadoop.ozone.om.request.security;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.security;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
index c029ea3..94e551a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java
@@ -23,6 +23,8 @@
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.audit.AuditMessage;
@@ -32,8 +34,10 @@
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager;
import org.apache.ozone.test.LambdaTestUtils;
import org.junit.After;
@@ -67,6 +71,7 @@ public class TestOMSnapshotCreateRequest {
private OzoneManager ozoneManager;
private OMMetrics omMetrics;
private OMMetadataManager omMetadataManager;
+ private BatchOperation batchOperation;
private String volumeName;
private String bucketName;
@@ -98,6 +103,7 @@ public void setup() throws Exception {
AuditLogger auditLogger = mock(AuditLogger.class);
when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+ batchOperation = omMetadataManager.getStore().initBatchOperation();
volumeName = UUID.randomUUID().toString();
bucketName = UUID.randomUUID().toString();
@@ -111,6 +117,9 @@ public void setup() throws Exception {
public void stop() {
omMetrics.unRegister();
Mockito.framework().clearInlineMocks();
+ if (batchOperation != null) {
+ batchOperation.close();
+ }
}
@Test
@@ -239,6 +248,50 @@ public void testValidateAndUpdateCache() throws Exception {
}
@Test
+ public void testEmptyRenamedKeyTable() throws Exception {
+ when(ozoneManager.isAdmin(any())).thenReturn(true);
+ OmKeyInfo toKeyInfo = addKey("key1");
+ OmKeyInfo fromKeyInfo = addKey("key2");
+
+ OMResponse omResponse =
+ OMResponse.newBuilder().setRenameKeyResponse(
+ OzoneManagerProtocolProtos.RenameKeyResponse.getDefaultInstance())
+ .setStatus(OzoneManagerProtocolProtos.Status.OK)
+ .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+ .build();
+ OMKeyRenameResponse omKeyRenameResponse =
+ new OMKeyRenameResponse(omResponse, fromKeyInfo.getKeyName(),
+ toKeyInfo.getKeyName(), toKeyInfo);
+
+ Assert.assertTrue(omMetadataManager.getRenamedKeyTable().isEmpty());
+ omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation);
+ omMetadataManager.getStore().commitBatchOperation(batchOperation);
+ Assert.assertFalse(omMetadataManager.getRenamedKeyTable().isEmpty());
+
+ OMRequest omRequest =
+ OMRequestTestUtils.createSnapshotRequest(
+ volumeName, bucketName, snapshotName);
+ OMSnapshotCreateRequest omSnapshotCreateRequest = doPreExecute(omRequest);
+ String key = SnapshotInfo.getTableKey(volumeName,
+ bucketName, snapshotName);
+
+ Assert.assertNull(omMetadataManager.getSnapshotInfoTable().get(key));
+
+ //create entry
+ OMClientResponse omClientResponse =
+ omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1,
+ ozoneManagerDoubleBufferHelper);
+ omClientResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
+ omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+ SnapshotInfo snapshotInfo =
+ omMetadataManager.getSnapshotInfoTable().get(key);
+ Assert.assertNotNull(snapshotInfo);
+ Assert.assertTrue(omMetadataManager.getRenamedKeyTable().isEmpty());
+
+ }
+
+ @Test
public void testEntryExists() throws Exception {
when(ozoneManager.isAdmin(any())).thenReturn(true);
OMRequest omRequest =
@@ -290,4 +343,16 @@ static OMSnapshotCreateRequest doPreExecute(
return new OMSnapshotCreateRequest(modifiedRequest);
}
+ private OmKeyInfo addKey(String keyName) {
+ return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+ HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, 0L);
+ }
+
+ protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception {
+ OMRequestTestUtils.addKeyToTable(false, false, keyInfo, 0, 0L,
+ omMetadataManager);
+ return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(),
+ keyInfo.getBucketName(), keyInfo.getKeyName());
+ }
+
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java
index d20b493..28b94cb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for snapshot requests.
*/
-package org.apache.hadoop.ozone.om.request.snapshot;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.snapshot;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
index 1552af7..6d87d3f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java
@@ -18,4 +18,4 @@
/**
* Package contains test classes for volume acl requests.
*/
-package org.apache.hadoop.ozone.om.request.volume.acl;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.volume.acl;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
index cbe3e2d..c5b43da 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java
@@ -18,4 +18,4 @@
/**
* Package contains test classes for volume requests.
*/
-package org.apache.hadoop.ozone.om.request.volume;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.request.volume;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
index ab858e3..7097532 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java
@@ -385,4 +385,4 @@ private HddsProtos.PipelineID aPipelineID() {
.setId(UUID.randomUUID().toString())
.build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
index 0980106..efab379 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for bucket responses.
*/
-package org.apache.hadoop.ozone.om.response.bucket;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.bucket;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java
index 4c6c005..7902412 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for file responses.
*/
-package org.apache.hadoop.ozone.om.response.file;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.file;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java
index ecd80c8..1e12941 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.junit.Assert;
import org.junit.Test;
@@ -52,7 +53,7 @@ public void testAddToDBBatch() throws Exception {
String toKeyName = UUID.randomUUID().toString();
OmKeyInfo toKeyInfo = getOmKeyInfo(toKeyName);
- OmKeyInfo fromKeyInfo = getOmKeyInfo(keyName);
+ OmKeyInfo fromKeyInfo = getOmKeyInfo(toKeyInfo, keyName);
String dbFromKey = addKeyToTable(fromKeyInfo);
String dbToKey = getDBKeyName(toKeyInfo);
@@ -63,6 +64,7 @@ public void testAddToDBBatch() throws Exception {
.isExist(dbFromKey));
Assert.assertFalse(omMetadataManager.getKeyTable(getBucketLayout())
.isExist(dbToKey));
+ Assert.assertTrue(omMetadataManager.getRenamedKeyTable().isEmpty());
if (getBucketLayout() == BucketLayout.FILE_SYSTEM_OPTIMIZED) {
Assert.assertFalse(omMetadataManager.getDirectoryTable()
.isExist(getDBKeyName(fromKeyParent)));
@@ -81,6 +83,17 @@ public void testAddToDBBatch() throws Exception {
.isExist(dbFromKey));
Assert.assertTrue(omMetadataManager.getKeyTable(getBucketLayout())
.isExist(dbToKey));
+
+ String renameDbKey = omMetadataManager.getRenameKey(
+ fromKeyInfo.getVolumeName(), fromKeyInfo.getBucketName(),
+ fromKeyInfo.getObjectID());
+ Assert.assertTrue(omMetadataManager.getRenamedKeyTable()
+ .isExist(renameDbKey));
+
+ OmKeyRenameInfo omKeyRenameInfo =
+ omMetadataManager.getRenamedKeyTable().get(renameDbKey);
+ Assert.assertTrue(omKeyRenameInfo.getOmKeyRenameInfoList()
+ .contains(dbFromKey));
if (getBucketLayout() == BucketLayout.FILE_SYSTEM_OPTIMIZED) {
Assert.assertTrue(omMetadataManager.getDirectoryTable()
.isExist(getDBKeyName(fromKeyParent)));
@@ -104,7 +117,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception {
String toKeyName = UUID.randomUUID().toString();
OmKeyInfo toKeyInfo = getOmKeyInfo(toKeyName);
- OmKeyInfo fromKeyInfo = getOmKeyInfo(keyName);
+ OmKeyInfo fromKeyInfo = getOmKeyInfo(toKeyInfo, keyName);
String dbFromKey = addKeyToTable(fromKeyInfo);
String dbToKey = getDBKeyName(toKeyInfo);
@@ -145,6 +158,11 @@ protected OmKeyInfo getOmKeyInfo(String keyName) {
replicationType, replicationFactor, 0L);
}
+ protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo,
+ String keyName) {
+ return getOmKeyInfo(keyName);
+ }
+
protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception {
OMRequestTestUtils.addKeyToTable(false, false, keyInfo, clientID, 0L,
omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java
index 6dcf38d..f2f9cca 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java
@@ -45,6 +45,15 @@ protected OmKeyInfo getOmKeyInfo(String keyName) {
}
@Override
+ protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo,
+ String keyName) {
+ return OMRequestTestUtils.createOmKeyInfo(toKeyInfo.getVolumeName(),
+ toKeyInfo.getBucketName(), keyName, replicationType,
+ replicationFactor, toKeyInfo.getObjectID(),
+ toKeyInfo.getParentObjectID(), 0L, toKeyInfo.getCreationTime());
+ }
+
+ @Override
protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception {
OMRequestTestUtils.addFileToKeyTable(false, false,
keyInfo.getFileName(), keyInfo, clientID, txnLogId, omMetadataManager);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java
index 1ebf4c2..3f3e5f8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for key responses.
*/
-package org.apache.hadoop.ozone.om.response.key;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.key;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/package-info.java
index b8c34e1..0d698b5 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/package-info.java
@@ -20,4 +20,4 @@
/**
* Package contains test classes for snapshot responses.
*/
-package org.apache.hadoop.ozone.om.response.snapshot;
\ No newline at end of file
+package org.apache.hadoop.ozone.om.response.snapshot;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
index 9bfa574..540eddf 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
@@ -445,4 +445,4 @@ private OmKeyArgs createAndCommitKey(KeyManager keyManager, String volumeName,
writeClient.commitKey(keyArg, session.getId());
return keyArg;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
index 4d1a30c..f823ff3 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
@@ -192,4 +192,4 @@ public void execute(OzoneManager arg) {
arg.getVersion();
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
index 142aabb..70274fc 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java
@@ -75,4 +75,4 @@ public void testValidateRequest() {
assertTrue(AWSV4AuthValidator.validateRequest(strToSign, signature,
awsAccessKey));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java
index d37af74..e3038d9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java
@@ -159,4 +159,4 @@ private X509Certificate getX509Certificate() throws Exception {
return KeyStoreTestUtil.generateCertificate(
"CN=Test", keyPair, 365, securityConfig.getSignatureAlgo());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java
index 32ae745..3297e3d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java
@@ -248,4 +248,4 @@ public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) {
LOG.info("Average token sign time with {}({} symmetric key) is {} ns",
hmacAlgorithm, keyLen, duration / testTokenCount);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
index bb81572..a7ce58a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
@@ -346,4 +346,4 @@ public void testTokenPersistence() throws IOException {
Assert.assertEquals("Deserialize Serialized Token should equal.",
idWrite, idRead);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
index 2d34250..4ea3498 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
@@ -550,4 +550,4 @@ private void validateNone(OzoneObj obj, RequestContext.Builder
nativeAuthorizer.checkAccess(obj, builder.setAclRights(a).build()));
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSOutputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSOutputStream.java
index 7cc76fc..689fde7 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSOutputStream.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSOutputStream.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.fs.ozone;
+import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.fs.impl.StoreImplementationUtils;
import org.apache.hadoop.ozone.client.io.ECKeyOutputStream;
@@ -44,6 +45,16 @@ public CapableOzoneFSOutputStream(OzoneFSOutputStream outputStream) {
@Override
public boolean hasCapability(String capability) {
OutputStream os = getWrappedOutputStream().getOutputStream();
+
+ if (os instanceof CryptoOutputStream) {
+ OutputStream wrapped = ((CryptoOutputStream) os).getWrappedStream();
+ return hasWrappedCapability(wrapped, capability);
+ }
+ return hasWrappedCapability(os, capability);
+ }
+
+ private static boolean hasWrappedCapability(OutputStream os,
+ String capability) {
if (os instanceof ECKeyOutputStream) {
return false;
} else if (os instanceof KeyOutputStream) {
@@ -55,7 +66,7 @@ public boolean hasCapability(String capability) {
return false;
}
}
- // deal with CryptoOutputStream
+ // this is unexpected. try last resort
return StoreImplementationUtils.hasCapability(os, capability);
}
}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
index cb90e5f..993bafa 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -27,4 +27,4 @@
package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
index 51284c2..cc64907 100644
--- a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
+++ b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -19,4 +19,4 @@
/**
* Ozone FS Contract tests.
*/
-package org.apache.hadoop.fs.ozone;
\ No newline at end of file
+package org.apache.hadoop.fs.ozone;
diff --git a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
index cb90e5f..993bafa 100644
--- a/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
+++ b/hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -27,4 +27,4 @@
package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
index cb90e5f..993bafa 100644
--- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
+++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -27,4 +27,4 @@
package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
index cb90e5f..993bafa 100644
--- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
+++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java
@@ -27,4 +27,4 @@
package org.apache.hadoop.fs.ozone;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
\ No newline at end of file
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java
index 2e5cf0f..afa5a82 100644
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java
+++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java
@@ -19,4 +19,4 @@
/**
* Recon code generation support for entities and daos.
*/
-package org.hadoop.ozone.recon.codegen;
\ No newline at end of file
+package org.hadoop.ozone.recon.codegen;
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java
index 3c701f9..e4d6eb0 100644
--- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java
+++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java
@@ -19,4 +19,4 @@
/**
* Classes in this package define the schema for Recon Sql database.
*/
-package org.hadoop.ozone.recon.schema;
\ No newline at end of file
+package org.hadoop.ozone.recon.schema;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
index 162fbc2..008c1f1 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
@@ -336,4 +336,4 @@ public OmDirectoryInfo getDirInfo(String[] names) throws IOException {
.setName(names[2])
.build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/package-info.java
index 2f31546..14c64ce 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/package-info.java
@@ -19,4 +19,4 @@
/**
* Classes for handling different entity and bucket types.
*/
-package org.apache.hadoop.ozone.recon.api.handlers;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.api.handlers;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java
index 504cd35..5120d87 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java
@@ -20,4 +20,4 @@
* The classes in this package define api endpoints for Recon.
*/
-package org.apache.hadoop.ozone.recon.api;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.api;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java
index dc8714a..973067f 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java
@@ -19,4 +19,4 @@
/**
* Common type definitions for Recon API.
*/
-package org.apache.hadoop.ozone.recon.api.types;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.api.types;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
index fdf493f..7e9af7b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
@@ -118,4 +118,4 @@ public NSSummary copyObject(NSSummary object) {
copy.setDirName(object.getDirName());
return copy;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java
index 0812d39..0f87a03 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/package-info.java
@@ -19,4 +19,4 @@
/**
* This package defines the codecs for Recon DB tables.
*/
-package org.apache.hadoop.ozone.recon.codec;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.codec;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java
index 5b5d4cc..b004abd 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthStatus.java
@@ -124,4 +124,4 @@ private ContainerPlacementStatus getPlacementStatus(
.collect(Collectors.toList());
return policy.validateContainerPlacement(dns, repFactor);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/package-info.java
index 73d52aa..28dc399 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/package-info.java
@@ -19,4 +19,4 @@
/**
* This package defines the persistence interfaces for Recon SQL DB.
*/
-package org.apache.hadoop.ozone.recon.fsck;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.fsck;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ReconTaskStatusMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ReconTaskStatusMetrics.java
index a8db882..471dad1 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ReconTaskStatusMetrics.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ReconTaskStatusMetrics.java
@@ -80,4 +80,4 @@ public void getMetrics(MetricsCollector collector, boolean all) {
builder.endRecord();
});
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java
index e4f64d9..5cede7e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains application entry point and related classes for Recon.
*/
-package org.apache.hadoop.ozone.recon;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/SqliteDataSourceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/SqliteDataSourceProvider.java
index 897f8be..e97238d 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/SqliteDataSourceProvider.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/SqliteDataSourceProvider.java
@@ -50,4 +50,4 @@ public DataSource get() {
ds.setUrl(configuration.getJdbcUrl());
return ds;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
index d4fd2a3..3761910 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
@@ -19,4 +19,4 @@
/**
* This package defines the persistence interfaces for Recon SQL DB.
*/
-package org.apache.hadoop.ozone.recon.persistence;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.persistence;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index ef0b83a..4faced0 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -138,4 +138,4 @@ public long getLastSequenceNumberFromDB() {
public boolean isOmTablesInitialized() {
return omTablesInitialized;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
index f4b4770..612ffda 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
@@ -19,4 +19,4 @@
/**
* The classes in this package handle OM snapshot recovery and checkpoints.
*/
-package org.apache.hadoop.ozone.recon.recovery;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.recovery;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/package-info.java
index d91bd8b..db30159 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/package-info.java
@@ -19,4 +19,4 @@
/**
* The classes in this package handle OM snapshot recovery and checkpoints.
*/
-package org.apache.hadoop.ozone.recon.scm;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.scm;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
index 86c2629..81b4182 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
@@ -19,4 +19,4 @@
* The classes in this package define the Service Provider implementations for
* Recon. This provides connectivity to underlying Ozone subsystems.
*/
-package org.apache.hadoop.ozone.recon.spi.impl;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.spi.impl;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java
index 894b791..37a26db 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java
@@ -21,4 +21,4 @@
* Recon. The implementations of Spi interfaces provide connectivity to
* underlying Ozone subsystems.
*/
-package org.apache.hadoop.ozone.recon.spi;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.spi;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
index 6607f19..ec1ccd0 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
@@ -313,4 +313,4 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
index f74a033..ce75d29 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
@@ -19,4 +19,4 @@
* The classes in this package contains the various scheduled tasks used by
* Recon.
*/
-package org.apache.hadoop.ozone.recon.tasks;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.tasks;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index be85ca8..307c77f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -1247,4 +1247,4 @@ private static SCMNodeStat getMockSCMRootStat() {
return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE,
ROOT_QUOTA - ROOT_DATA_SIZE);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index 31f1bed..74a7968 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -1285,4 +1285,4 @@ private static SCMNodeStat getMockSCMRootStat() {
return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE,
ROOT_QUOTA - ROOT_DATA_SIZE);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java
index faf2658..959d8a0 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java
@@ -18,4 +18,4 @@
/**
* The classes in this package test the Rest API layer of Recon.
*/
-package org.apache.hadoop.ozone.recon.api;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.api;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
index 2f404c7..572e4d7 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
@@ -180,4 +180,4 @@ private Set<ContainerReplica> generateReplicas(ContainerInfo cont,
}
return replicas;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index 51e6f4a..33a2a33 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -396,4 +396,4 @@ private boolean isDnPresent(List<DatanodeDetails> dns) {
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java
index d0066a3..434c585 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java
@@ -18,4 +18,4 @@
/**
* Package for recon server tests.
*/
-package org.apache.hadoop.ozone.recon;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
index 63b8505..7df7879 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java
@@ -19,4 +19,4 @@
/**
* End to end tests for persistence classes.
*/
-package org.apache.hadoop.ozone.recon.persistence;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.persistence;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
index b33c927..2f398b4 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java
@@ -201,4 +201,4 @@ private OMMetadataManager getOMMetadataManager() throws IOException {
private BucketLayout getBucketLayout() {
return BucketLayout.DEFAULT;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
index c3b0b34..67e7c33 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java
@@ -18,4 +18,4 @@
/**
* Package for recon server - OM service specific tests.
*/
-package org.apache.hadoop.ozone.recon.recovery;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.recovery;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 5baec5c..dec0b70 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -236,4 +236,4 @@ protected ContainerWithPipeline getTestContainer(long id,
.build();
return new ContainerWithPipeline(containerInfo, pipeline);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index c8abe3c..5b8cc09 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -183,4 +183,4 @@ private LifeCycleState getContainerStateFromReplicaState(
.build();
return crBuilder.addReport(replicaProto).build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
index 2cd7211..648e31c 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
@@ -235,4 +235,4 @@ private List<Pipeline> getPipelines(int size) {
}
return pipelines;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
index 20d5e92..2dde931 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
@@ -102,4 +102,4 @@ public void testProcessPipelineReport()
.getPipeline(pipelineID);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconDBProvider.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconDBProvider.java
index 3366843..14c094d 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconDBProvider.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconDBProvider.java
@@ -68,4 +68,4 @@ public void testGet() throws Exception {
assertNotNull(reconDBProvider.getDbStore());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java
index 912cb57..3fcc4bc 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java
@@ -102,4 +102,4 @@ public void testGetPipeline() throws IOException {
verify(scmClient, times(1))
.getPipeline(pipelineID);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
index 932c437..3ecef6f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java
@@ -18,4 +18,4 @@
/**
* Package for recon server impl tests.
*/
-package org.apache.hadoop.ozone.recon.spi.impl;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.spi.impl;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
index ec046e3..b959455 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
@@ -285,4 +285,4 @@ private OmKeyInfo buildOmKeyInfo(String volume,
private BucketLayout getBucketLayout() {
return BucketLayout.DEFAULT;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
index 0c892bd..0199cb6 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java
@@ -489,4 +489,4 @@ private static BucketLayout getLegacyBucketLayout() {
private static BucketLayout getOBSBucketLayout() {
return BucketLayout.OBJECT_STORE;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
index 332d882..2f81bc3 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java
@@ -737,4 +737,4 @@ private static void initializeNewOmMetadataManager(
private static BucketLayout getBucketLayout() {
return BucketLayout.LEGACY;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
index 5227cd4..d03599f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java
@@ -367,4 +367,4 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
private BucketLayout getBucketLayout() {
return BucketLayout.DEFAULT;
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
index 9e1a31a..2911536 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java
@@ -19,4 +19,4 @@
* The classes in this package tests the various scheduled tasks used by
* Recon.
*/
-package org.apache.hadoop.ozone.recon.tasks;
\ No newline at end of file
+package org.apache.hadoop.ozone.recon.tasks;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java
index dd916e8..f13dd69 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java
@@ -26,4 +26,4 @@
xmlns = {
@javax.xml.bind.annotation.XmlNs(namespaceURI = "http://s3.amazonaws"
+ ".com/doc/2006-03-01/", prefix = "")})
-package org.apache.hadoop.ozone.s3.commontypes;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3.commontypes;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java
index b978751..05388d6 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java
@@ -445,4 +445,4 @@ public long getHeadKeyFailure() {
public long getListS3BucketsFailure() {
return listS3BucketsFailure.value();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/package-info.java
index 3620462..c6983d9 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/package-info.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains Ozone S3 Metrics.
*/
-package org.apache.hadoop.ozone.s3.metrics;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3.metrics;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java
index 9d41551..e1cd434 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains the top level generic classes of s3 gateway.
*/
-package org.apache.hadoop.ozone.s3;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/package-info.java
index 63f18a6..1ebd14a 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/package-info.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains Ozone S3 Authorization header.
*/
-package org.apache.hadoop.ozone.s3.signature;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3.signature;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
index af93f08..decaf6e 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
@@ -19,4 +19,4 @@
/**
* This package contains Ozone S3 Util classes.
*/
-package org.apache.hadoop.ozone.s3.util;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3.util;
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java
index 10e4274..e5cd51b 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java
@@ -18,4 +18,4 @@
/**
* In-memory OzoneClient implementation to test REST endpoints.
*/
-package org.apache.hadoop.ozone.client;
\ No newline at end of file
+package org.apache.hadoop.ozone.client;
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java
index 071d093..bfe80b4 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java
@@ -115,4 +115,4 @@ private InputStream fileContent(String content) {
return new SignedChunksInputStream(
new ByteArrayInputStream(content.getBytes(UTF_8)));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java
index 7c5bfad..4576332 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java
@@ -35,4 +35,4 @@ public void serialize() throws JAXBException {
context.createMarshaller().marshal(new ListObjectResponse(), System.out);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
index b3ccfd7..0543c45 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
@@ -74,4 +74,4 @@ private MultiDeleteRequest unmarshall(ByteArrayInputStream inputBody)
return new MultiDeleteRequestUnmarshaller()
.readFrom(null, null, null, null, null, inputBody);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java
index 070c827..f784dd0 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java
@@ -50,4 +50,4 @@ public void parseSourceHeaderWithPrefix() throws OS3Exception {
Assert.assertEquals("key1", bucketKey.getRight());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java
index d320041..6ad1f3c 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java
@@ -18,4 +18,4 @@
/**
* Unit tests for the rest endpoint implementations.
*/
-package org.apache.hadoop.ozone.s3.endpoint;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3.endpoint;
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java
index 31effe4..e366795 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java
@@ -19,4 +19,4 @@
/**
* This package tests OS3Exception.
*/
-package org.apache.hadoop.ozone.s3.exception;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3.exception;
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
index daae572..f81b89b 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
@@ -602,4 +602,4 @@ private String initiateMultipartUpload(String bktName, String key)
}
return "Invalid-Id";
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java
index e7e04ab..64519a4 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java
@@ -18,4 +18,4 @@
/**
* Unit tests for the bucket related rest endpoints.
*/
-package org.apache.hadoop.ozone.s3;
\ No newline at end of file
+package org.apache.hadoop.ozone.s3;
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java
index e0b7c10..43eafaf 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/signature/TestStringToSignProducer.java
@@ -85,4 +85,4 @@ public void validateDateRange(Credential credentialObj) {
signatureBase);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java
index 05f9e79..6c96a63 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java
@@ -67,4 +67,4 @@ public void encodeDecodeNullDir() throws OS3Exception {
Assert.assertEquals(ct, parsedToken);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java
index 7576025..18e8366 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java
@@ -41,4 +41,4 @@ public void parse() {
Assert.assertEquals("Mon, 05 Nov 2018 15:04:05 GMT", formatted);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/package-info.java
index ad81bb4..48eb2b6 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/package-info.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/package-info.java
@@ -21,4 +21,4 @@
/**
* Ozone Namespace CLI tools.
*/
-package org.apache.hadoop.ozone.admin.nssummary;
\ No newline at end of file
+package org.apache.hadoop.ozone.admin.nssummary;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java
index bd46439..15cebdb 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java
@@ -87,4 +87,4 @@ private void printReconfigurationStatus(ReconfigurationTaskStatus status) {
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DeletedBlocksTxnCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DeletedBlocksTxnCommands.java
new file mode 100644
index 0000000..3473cd8
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DeletedBlocksTxnCommands.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.admin.scm;
+
+import org.apache.hadoop.hdds.cli.GenericCli;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import picocli.CommandLine;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Subcommand to group container related operations.
+ */
+@CommandLine.Command(
+ name = "deletedBlocksTxn",
+ description = "SCM deleted blocks transaction specific operations",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class,
+ subcommands = {
+ GetFailedDeletedBlocksTxnSubcommand.class,
+ ResetDeletedBlockRetryCountSubcommand.class,
+ })
+public class DeletedBlocksTxnCommands implements Callable<Void> {
+
+ @CommandLine.Spec
+ private CommandLine.Model.CommandSpec spec;
+
+ @Override
+ public Void call() throws Exception {
+ GenericCli.missingSubcommand(spec);
+ return null;
+ }
+}
+
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizationScmStatusSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizationScmStatusSubcommand.java
index cf320a5..bf9d6fe 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizationScmStatusSubcommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizationScmStatusSubcommand.java
@@ -48,4 +48,4 @@ public void execute(ScmClient client) throws IOException {
client.queryUpgradeFinalizationProgress(upgradeClientID, false, true);
System.out.println(progress.status());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java
new file mode 100644
index 0000000..cad6c6c
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.admin.scm;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
+import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.common.helpers.DeletedBlocksTransactionInfoWrapper;
+import org.apache.hadoop.hdds.server.JsonUtils;
+import picocli.CommandLine;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+/**
+ * Handler of getting expired deleted blocks from SCM side.
+ */
+@CommandLine.Command(
+ name = "ls",
+ description = "Print the failed DeletedBlocksTransaction(retry count = -1)",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class)
+public class GetFailedDeletedBlocksTxnSubcommand extends ScmSubcommand {
+
+ @CommandLine.ArgGroup(multiplicity = "1")
+ private TransactionsOption group;
+
+ static class TransactionsOption {
+ @CommandLine.Option(names = {"-a", "--all"},
+ description = "Get all the failed transactions.")
+ private boolean getAll;
+
+ @CommandLine.Option(names = {"-c", "--count"},
+ defaultValue = "20",
+ description = "Get at most the count number of the" +
+ " failed transactions.")
+ private int count;
+ }
+
+ @CommandLine.Option(names = {"-s", "--startTxId"},
+ defaultValue = "0",
+ description = "The least transaction ID to start with, default 0." +
+ " Only work with -c/--count")
+ private long startTxId;
+
+ @CommandLine.Option(names = {"-o", "--out"},
+ description = "Print transactions into file in JSON format.")
+ private String fileName;
+
+ private static final int LIST_ALL_FAILED_TRANSACTIONS = -1;
+
+ @Override
+ public void execute(ScmClient client) throws IOException {
+ List<DeletedBlocksTransactionInfo> response;
+ int count = group.getAll ? LIST_ALL_FAILED_TRANSACTIONS : group.count;
+ response = client.getFailedDeletedBlockTxn(count, startTxId);
+ List<DeletedBlocksTransactionInfoWrapper> txns = response.stream()
+ .map(DeletedBlocksTransactionInfoWrapper::fromProtobuf)
+ .filter(Objects::nonNull)
+ .collect(Collectors.toList());
+
+ String result = JsonUtils.toJsonStringWithDefaultPrettyPrinter(txns);
+ if (fileName != null) {
+ try (FileOutputStream f = new FileOutputStream(fileName)) {
+ f.write(result.getBytes(StandardCharsets.UTF_8));
+ }
+ } else {
+ System.out.println(result);
+ }
+ }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java
index 7c91359..47a0ec2 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java
@@ -16,22 +16,32 @@
*/
package org.apache.hadoop.ozone.admin.scm;
+import com.google.gson.Gson;
+import com.google.gson.JsonIOException;
+import com.google.gson.JsonSyntaxException;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.common.helpers.DeletedBlocksTransactionInfoWrapper;
import picocli.CommandLine;
+import java.io.FileInputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
-
+import java.util.stream.Collectors;
/**
- * Handler of the expired deleted blocks from SCM side.
+ * Handler of resetting expired deleted blocks from SCM side.
*/
@CommandLine.Command(
- name = "resetDeletedBlockRetryCount",
- description = "Reset deleted block transactions whose retry count is -1",
+ name = "reset",
+ description = "Reset the retry count of failed DeletedBlocksTransaction",
mixinStandardHelpOptions = true,
versionProvider = HddsVersionProvider.class)
public class ResetDeletedBlockRetryCountSubcommand extends ScmSubcommand {
@@ -41,25 +51,52 @@ public class ResetDeletedBlockRetryCountSubcommand extends ScmSubcommand {
static class TransactionsOption {
@CommandLine.Option(names = {"-a", "--all"},
- description = "reset all expired deleted block transaction retry" +
+ description = "Reset all expired deleted block transaction retry" +
" count from -1 to 0.")
private boolean resetAll;
@CommandLine.Option(names = {"-l", "--list"},
split = ",",
- description = "reset the only given deletedBlock transaction ID" +
- " list, e.g 100,101,102.(Separated by ',')")
+ paramLabel = "txID",
+ description = "Reset the only given deletedBlock transaction ID" +
+ " list. Example: 100,101,102.(Separated by ',')")
private List<Long> txList;
- }
- @CommandLine.ParentCommand
- private ScmAdmin parent;
+ @CommandLine.Option(names = {"-i", "--in"},
+ description = "Use file as input, need to be JSON Array format and " +
+ "contains multi \"txID\" key. Example: [{\"txID\":1},{\"txID\":2}]")
+ private String fileName;
+ }
@Override
public void execute(ScmClient client) throws IOException {
int count;
if (group.resetAll) {
count = client.resetDeletedBlockRetryCount(new ArrayList<>());
+ } else if (group.fileName != null) {
+ Gson gson = new Gson();
+ List<Long> txIDs;
+ try (InputStream in = new FileInputStream(group.fileName);
+ Reader fileReader = new InputStreamReader(in,
+ StandardCharsets.UTF_8)) {
+ DeletedBlocksTransactionInfoWrapper[] txns = gson.fromJson(fileReader,
+ DeletedBlocksTransactionInfoWrapper[].class);
+ txIDs = Arrays.stream(txns)
+ .map(DeletedBlocksTransactionInfoWrapper::getTxID)
+ .sorted()
+ .distinct()
+ .collect(Collectors.toList());
+ System.out.println("Num of loaded txIDs: " + txIDs.size());
+ if (!txIDs.isEmpty()) {
+ System.out.println("The first loaded txID: " + txIDs.get(0));
+ System.out.println("The last loaded txID: " +
+ txIDs.get(txIDs.size() - 1));
+ }
+ } catch (JsonIOException | JsonSyntaxException | IOException ex) {
+ System.out.println("Cannot parse the file " + group.fileName);
+ throw new IOException(ex);
+ }
+ count = client.resetDeletedBlockRetryCount(txIDs);
} else {
if (group.txList == null || group.txList.isEmpty()) {
System.out.println("TransactionId list should not be empty");
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
index 34cf3c7..a7f96de 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
@@ -38,8 +38,8 @@
GetScmRatisRolesSubcommand.class,
FinalizeScmUpgradeSubcommand.class,
FinalizationScmStatusSubcommand.class,
- ResetDeletedBlockRetryCountSubcommand.class,
- TransferScmLeaderSubCommand.class
+ TransferScmLeaderSubCommand.class,
+ DeletedBlocksTxnCommands.class
})
@MetaInfServices(SubcommandWithParent.class)
public class ScmAdmin extends GenericCli implements SubcommandWithParent {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
index ba26a10..fbfc293 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
@@ -19,4 +19,4 @@
/**
* Contains all of the datanode container replica related commands.
*/
-package org.apache.hadoop.ozone.debug.container;
\ No newline at end of file
+package org.apache.hadoop.ozone.debug.container;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
index 86c9a95..cc450aa 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
@@ -185,4 +185,4 @@ private void printProgressBar(PrintStream stream, long value) {
sb.append(" Time: ").append(timeToPrint);
stream.print(sb.toString());
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index 38a9607..ee5ac61 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -450,7 +450,7 @@ private void doCleanObjects() throws InterruptedException {
*
* @param out PrintStream
*/
- private void printStats(PrintStream out) {
+ void printStats(PrintStream out) {
long endTime = System.nanoTime() - startTime;
String execTime = DurationFormatUtils
.formatDuration(TimeUnit.NANOSECONDS.toMillis(endTime),
@@ -484,8 +484,9 @@ private void printStats(PrintStream out) {
out.println("Number of Volumes created: " + numberOfVolumesCreated);
out.println("Number of Buckets created: " + numberOfBucketsCreated);
out.println("Number of Keys added: " + numberOfKeysAdded);
- out.println("Replication: " + replicationConfig.getReplication());
- out.println("Replication type: " + replicationConfig.getReplicationType());
+ if (replicationConfig != null) {
+ out.println("Replication: " + replicationConfig);
+ }
out.println(
"Average Time spent in volume creation: " + prettyAverageVolumeTime);
out.println(
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
index 8704584..673b527 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
@@ -907,4 +907,4 @@ private static ContainerReportsProto createContainerReport() {
private static PipelineReportsProto createPipelineReport() {
return PipelineReportsProto.newBuilder().build();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java
index 432d65c..0de014c 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java
@@ -41,4 +41,4 @@
*/
-package org.apache.hadoop.ozone.fsck;
\ No newline at end of file
+package org.apache.hadoop.ozone.fsck;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java
index 6d7e857..128ad6d 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java
@@ -77,4 +77,4 @@ public void persistToken(Token<OzoneTokenIdentifier> token)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java
index d5cdc9b..bf5c109 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java
@@ -20,4 +20,4 @@
/**
* Commands for Ozone volumes.
*/
-package org.apache.hadoop.ozone.shell.volume;
\ No newline at end of file
+package org.apache.hadoop.ozone.shell.volume;
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java
index 6ba9faa..a311d30 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/debug/TestDBDefinitionFactory.java
@@ -77,4 +77,4 @@ public void testGetDefinition() {
new OzoneConfiguration());
assertTrue(definition instanceof DatanodeSchemaThreeDBDefinition);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java
index b559a5a..4fd036a 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java
@@ -68,4 +68,4 @@ public void compare(
new HashSet<Integer>(Arrays.asList(expectations)),
GeneratorDatanode.getPlacement(containerId, maxDatanodes, overlap));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
index f7cb075..7588d03 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
@@ -19,4 +19,4 @@
/**
* Freon Ozone Load Generator.
*/
-package org.apache.hadoop.ozone.freon;
\ No newline at end of file
+package org.apache.hadoop.ozone.freon;
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java
index 432d65c..0de014c 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java
@@ -41,4 +41,4 @@
*/
-package org.apache.hadoop.ozone.fsck;
\ No newline at end of file
+package org.apache.hadoop.ozone.fsck;
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java
index 595708c..1db31e1 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java
@@ -19,4 +19,4 @@
/**
* OM to SQL Converter. Currently broken.
*/
-package org.apache.hadoop.ozone.om;
\ No newline at end of file
+package org.apache.hadoop.ozone.om;
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java
index 1c58a7d..fd66a2a 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java
@@ -169,4 +169,4 @@ protected OzoneClient createRpcClientFromServiceId(
}
}
-}
\ No newline at end of file
+}