Reverting pervious commit r1128527
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@1128530 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java b/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
index dc516ae..a565f10 100644
--- a/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
+++ b/src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
@@ -84,9 +84,11 @@
*/
public static final String HDFS_URI_SCHEME = "hdfs";
- /**
- * Please see {@link LayoutVersion} on adding new layout version.
- */
- public static final int LAYOUT_VERSION =
- LayoutVersion.getCurrentLayoutVersion();
+ // Version is reflected in the dfs image and edit log files.
+ // Version is reflected in the data storage file.
+ // Versions are negative.
+ // Decrement LAYOUT_VERSION to define a new version.
+ public static final int LAYOUT_VERSION = -35;
+ // Current version:
+ // -35: Adding support for block pools and multiple namenodes
}
diff --git a/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
deleted file mode 100644
index 489a596..0000000
--- a/src/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * This class tracks changes in the layout version of HDFS.
- *
- * Layout version is changed for following reasons:
- * <ol>
- * <li>The layout of how namenode or datanode stores information
- * on disk changes.</li>
- * <li>A new operation code is added to the editlog.</li>
- * <li>Modification such as format of a record, content of a record
- * in editlog or fsimage.</li>
- * </ol>
- * <br>
- * <b>How to update layout version:<br></b>
- * When a change requires new layout version, please add an entry into
- * {@link Feature} with a short enum name, new layout version and description
- * of the change. Please see {@link Feature} for further details.
- * <br>
- */
-@InterfaceAudience.Private
-public class LayoutVersion {
-
- /**
- * Enums for features that change the layout version.
- * <br><br>
- * To add a new layout version:
- * <ul>
- * <li>Define a new enum constant with a short enum name, the new layout version
- * and description of the added feature.</li>
- * <li>When adding a layout version with an ancestor that is not same as
- * its immediate predecessor, use {@link Feature#Feature(int, int, String)}
- * </li>
- * </ul>
- *
- * When the new layout version is
- */
- public static enum Feature {
- NAMESPACE_QUOTA(-16, "Support for namespace quotas"),
- FILE_ACCESS_TIME(-17, "Support for access time on files"),
- DISKSPACE_QUOTA(-18, "Support for disk space quotas"),
- STICKY_BIT(-19, "Support for sticky bits"),
- APPEND_RBW_DIR(-20, "Datanode has \"rbw\" subdirectory for append"),
- ATOMIC_RENAME(-21, "Support for atomic rename"),
- CONCAT(-22, "Support for concat operation"),
- SYMLINKS(-23, "Support for symbolic links"),
- DELEGATION_TOKEN(-24, "Support for delegation tokens for security"),
- FSIMAGE_COMPRESSION(-25, "Support for fsimage compression"),
- FSIMAGE_CHECKSUM(-26, "Support checksum for fsimage"),
- REMOVE_REL13_DISK_LAYOUT_SUPPORT(-27, "Remove support for 0.13 disk layout"),
- EDITS_CHESKUM(-28, "Support checksum for editlog"),
- UNUSED(-29, "Skipped version"),
- FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"),
- RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203"),
- RESERVED_REL20_204(-32, "Reserved for release 0.20.204"),
- RESERVED_REL22(-33, -27, "Reserved for release 0.22"),
- RESERVED_REL23(-34, -30, "Reserved for release 0.23"),
- FEDERATION(-35, "Support for namenode federation");
-
- final int lv;
- final int ancestorLV;
- final String description;
-
- /**
- * Feature that is added at {@code currentLV}.
- * @param lv new layout version with the addition of this feature
- * @param description description of the feature
- */
- private Feature(final int lv, final String description) {
- this(lv, lv + 1, description);
- }
-
- /**
- * Feature that is added at {@code currentLV}.
- * @param lv new layout version with the addition of this feature
- * @param ancestorLV layout version from which the new lv is derived
- * from.
- * @param description description of the feature
- */
- private Feature(final int lv, final int ancestorLV,
- final String description) {
- this.lv = lv;
- this.ancestorLV = ancestorLV;
- this.description = description;
- }
- }
-
- // Build layout version and corresponding feature matrix
- static final Map<Integer, EnumSet<Feature>>map =
- new HashMap<Integer, EnumSet<Feature>>();
-
- // Static initialization
- static {
- initMap();
- }
-
- /**
- * Initialize the map of a layout version and EnumSet of {@link Feature}s
- * supported.
- */
- private static void initMap() {
- // Go through all the enum constants and build a map of
- // LayoutVersion <-> EnumSet of all supported features in that LayoutVersion
- for (Feature f : Feature.values()) {
- EnumSet<Feature> ancestorSet = map.get(f.ancestorLV);
- if (ancestorSet == null) {
- ancestorSet = EnumSet.noneOf(Feature.class); // Empty enum set
- map.put(f.ancestorLV, ancestorSet);
- }
- EnumSet<Feature> featureSet = EnumSet.copyOf(ancestorSet);
- featureSet.add(f);
- map.put(f.lv, featureSet);
- }
-
- // Special initialization for 0.20.203 and 0.20.204
- // to add Feature#DELEGATION_TOKEN
- specialInit(Feature.RESERVED_REL20_203.lv, Feature.DELEGATION_TOKEN);
- specialInit(Feature.RESERVED_REL20_204.lv, Feature.DELEGATION_TOKEN);
- }
-
- private static void specialInit(int lv, Feature f) {
- EnumSet<Feature> set = map.get(lv);
- set.add(f);
- }
-
- /**
- * Gets formatted string that describes {@link LayoutVersion} information.
- */
- public static String getString() {
- final StringBuilder buf = new StringBuilder();
- buf.append("Feature List:\n");
- for (Feature f : Feature.values()) {
- buf.append(f).append(" introduced in layout version ")
- .append(f.lv).append(" (").
- append(f.description).append(")\n");
- }
-
- buf.append("\n\nLayoutVersion and supported features:\n");
- for (Feature f : Feature.values()) {
- buf.append(f.lv).append(": ").append(map.get(f.lv))
- .append("\n");
- }
- return buf.toString();
- }
-
- /**
- * Returns true if a given feature is supported in the given layout version
- * @param f Feature
- * @param lv LayoutVersion
- * @return true if {@code f} is supported in layout version {@code lv}
- */
- public static boolean supports(final Feature f, final int lv) {
- final EnumSet<Feature> set = map.get(lv);
- return set != null && set.contains(f);
- }
-
- /**
- * Get the current layout version
- */
- public static int getCurrentLayoutVersion() {
- Feature[] values = Feature.values();
- return values[values.length - 1].lv;
- }
-}
diff --git a/src/java/org/apache/hadoop/hdfs/server/common/Storage.java b/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 257beae..ead9b72 100644
--- a/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -33,8 +33,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.fs.FileUtil;
@@ -77,6 +75,12 @@
* any upgrade code that uses this constant should also be removed. */
public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
+ // last layout version that did not support persistent rbw replicas
+ public static final int PRE_RBW_LAYOUT_VERSION = -19;
+
+ // last layout version that is before federation
+ public static final int LAST_PRE_FEDERATION_LAYOUT_VERSION = -30;
+
/** Layout versions of 0.20.203 release */
public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};
@@ -777,8 +781,8 @@
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("storageType", storageType.toString());
props.setProperty("namespaceID", String.valueOf(namespaceID));
- // Set clusterID in version with federation support
- if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ // Set clusterID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+ if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
props.setProperty("clusterID", clusterID);
}
props.setProperty("cTime", String.valueOf(cTime));
@@ -898,8 +902,8 @@
/** Validate and set clusterId from {@link Properties}*/
protected void setClusterId(Properties props, int layoutVersion,
StorageDirectory sd) throws InconsistentFSStateException {
- // Set cluster ID in version that supports federation
- if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ // No Cluster ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+ if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
String cid = getProperty(props, sd, "clusterID");
if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
throw new InconsistentFSStateException(sd.getRoot(),
diff --git a/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 51ed956..0900213 100644
--- a/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -30,8 +30,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -293,7 +291,7 @@
*/
void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
// Upgrading is applicable only to release with federation or after
- if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ if (!(this.getLayoutVersion() < LAST_PRE_FEDERATION_LAYOUT_VERSION)) {
return;
}
LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
@@ -348,8 +346,8 @@
* @throws IOException if the directory is not empty or it can not be removed
*/
private void cleanupDetachDir(File detachDir) throws IOException {
- if (!LayoutVersion.supports(Feature.APPEND_RBW_DIR, layoutVersion)
- && detachDir.exists() && detachDir.isDirectory()) {
+ if (layoutVersion >= PRE_RBW_LAYOUT_VERSION && detachDir.exists()
+ && detachDir.isDirectory()) {
if (detachDir.list().length != 0) {
throw new IOException("Detached directory " + detachDir
diff --git a/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index d365f2d..52e7dad 100644
--- a/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -37,6 +37,7 @@
import java.net.UnknownHostException;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
+import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedExceptionAction;
import java.security.SecureRandom;
import java.util.AbstractList;
@@ -1432,11 +1433,11 @@
}
int getPort() {
- return selfAddr == null ? -1 : selfAddr.getPort();
+ return selfAddr.getPort();
}
String getStorageId() {
- return storage == null ? null : storage.getStorageID();
+ return storage.getStorageID();
}
/**
@@ -1449,7 +1450,7 @@
}
public int getIpcPort() {
- return ipcServer == null ? -1 : ipcServer.getListenerAddress().getPort();
+ return ipcServer.getListenerAddress().getPort();
}
/**
diff --git a/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 6af702f..139e921 100644
--- a/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -43,8 +43,6 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
@@ -289,8 +287,8 @@
props.setProperty("cTime", String.valueOf(cTime));
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("storageID", getStorageID());
- // Set NamespaceID in version before federation
- if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ // Set NamespaceID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+ if (layoutVersion >= LAST_PRE_FEDERATION_LAYOUT_VERSION) {
props.setProperty("namespaceID", String.valueOf(namespaceID));
}
}
@@ -307,8 +305,8 @@
setStorageType(props, sd);
setClusterId(props, layoutVersion, sd);
- // Read NamespaceID in version before federation
- if (!LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ // Read NamespaceID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+ if (layoutVersion >= LAST_PRE_FEDERATION_LAYOUT_VERSION) {
setNamespaceID(props, sd);
}
@@ -375,10 +373,8 @@
assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
"Future version is not allowed";
- boolean federationSupported =
- LayoutVersion.supports(Feature.FEDERATION, layoutVersion);
// For pre-federation version - validate the namespaceID
- if (!federationSupported &&
+ if (layoutVersion >= Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION &&
getNamespaceID() != nsInfo.getNamespaceID()) {
throw new IOException("Incompatible namespaceIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode namespaceID = "
@@ -386,8 +382,8 @@
+ getNamespaceID());
}
- // For version that supports federation, validate clusterID
- if (federationSupported
+ // For post federation version, validate clusterID
+ if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION
&& !getClusterID().equals(nsInfo.getClusterID())) {
throw new IOException("Incompatible clusterIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode clusterID = "
@@ -439,7 +435,7 @@
* @throws IOException on error
*/
void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
- if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ if (layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
clusterID = nsInfo.getClusterID();
layoutVersion = nsInfo.getLayoutVersion();
sd.write();
@@ -497,7 +493,7 @@
* @throws IOException if the directory is not empty or it can not be removed
*/
private void cleanupDetachDir(File detachDir) throws IOException {
- if (!LayoutVersion.supports(Feature.APPEND_RBW_DIR, layoutVersion) &&
+ if (layoutVersion >= PRE_RBW_LAYOUT_VERSION &&
detachDir.exists() && detachDir.isDirectory() ) {
if (detachDir.list().length != 0 ) {
@@ -630,7 +626,7 @@
HardLink hardLink = new HardLink();
// do the link
int diskLayoutVersion = this.getLayoutVersion();
- if (LayoutVersion.supports(Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
+ if (diskLayoutVersion < PRE_RBW_LAYOUT_VERSION) { // RBW version
// hardlink finalized blocks in tmpDir/finalized
linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED),
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
diff --git a/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index 904c052..40ae574 100644
--- a/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -28,8 +28,6 @@
import java.util.zip.Checksum;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
@@ -235,7 +233,7 @@
BufferedInputStream bin = new BufferedInputStream(backupInputStream);
DataInputStream in = new DataInputStream(bin);
Checksum checksum = null;
- if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
+ if (logVersion <= -28) { // support fsedits checksum
checksum = FSEditLog.getChecksum();
in = new DataInputStream(new CheckedInputStream(bin, checksum));
}
@@ -363,7 +361,7 @@
FSEditLogLoader logLoader = new FSEditLogLoader(namesystem);
int logVersion = logLoader.readLogVersion(in);
Checksum checksum = null;
- if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
+ if (logVersion <= -28) { // support fsedits checksum
checksum = FSEditLog.getChecksum();
in = new DataInputStream(new CheckedInputStream(bin, checksum));
}
diff --git a/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 4cbca5d..8ff5835 100644
--- a/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -37,8 +37,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -113,7 +111,7 @@
try {
logVersion = readLogVersion(in);
Checksum checksum = null;
- if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
+ if (logVersion <= -28) { // support fsedits checksum
checksum = FSEditLog.getChecksum();
in = new DataInputStream(new CheckedInputStream(bin, checksum));
}
@@ -193,7 +191,7 @@
path = FSImageSerialization.readString(in);
short replication = fsNamesys.adjustReplication(readShort(in));
mtime = readLong(in);
- if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
+ if (logVersion <= -17) {
atime = readLong(in);
}
if (logVersion < -7) {
@@ -279,6 +277,10 @@
break;
}
case OP_CONCAT_DELETE: {
+ if (logVersion > -22) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
numOpConcatDelete++;
int length = in.readInt();
if (length < 3) { // trg, srcs.., timestam
@@ -337,7 +339,7 @@
// The disk format stores atimes for directories as well.
// However, currently this is not being updated/used because of
// performance reasons.
- if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
+ if (logVersion <= -17) {
atime = readLong(in);
}
@@ -368,6 +370,9 @@
}
case OP_SET_PERMISSIONS: {
numOpSetPerm++;
+ if (logVersion > -11)
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
fsDir.unprotectedSetPermission(
FSImageSerialization.readString(in), FsPermission.read(in));
break;
@@ -383,12 +388,20 @@
break;
}
case OP_SET_NS_QUOTA: {
+ if (logVersion > -16) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
fsDir.unprotectedSetQuota(FSImageSerialization.readString(in),
readLongWritable(in),
FSConstants.QUOTA_DONT_SET);
break;
}
case OP_CLEAR_NS_QUOTA: {
+ if (logVersion > -16) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
fsDir.unprotectedSetQuota(FSImageSerialization.readString(in),
FSConstants.QUOTA_RESET,
FSConstants.QUOTA_DONT_SET);
@@ -431,6 +444,10 @@
break;
}
case OP_RENAME: {
+ if (logVersion > -21) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
numOpRename++;
int length = in.readInt();
if (length != 3) {
@@ -447,6 +464,10 @@
break;
}
case OP_GET_DELEGATION_TOKEN: {
+ if (logVersion > -24) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
numOpGetDelegationToken++;
DelegationTokenIdentifier delegationTokenId =
new DelegationTokenIdentifier();
@@ -457,6 +478,10 @@
break;
}
case OP_RENEW_DELEGATION_TOKEN: {
+ if (logVersion > -24) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
numOpRenewDelegationToken++;
DelegationTokenIdentifier delegationTokenId =
new DelegationTokenIdentifier();
@@ -467,6 +492,10 @@
break;
}
case OP_CANCEL_DELEGATION_TOKEN: {
+ if (logVersion > -24) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
numOpCancelDelegationToken++;
DelegationTokenIdentifier delegationTokenId =
new DelegationTokenIdentifier();
@@ -476,6 +505,10 @@
break;
}
case OP_UPDATE_MASTER_KEY: {
+ if (logVersion > -24) {
+ throw new IOException("Unexpected opCode " + opCode
+ + " for version " + logVersion);
+ }
numOpUpdateMasterKey++;
DelegationKey delegationKey = new DelegationKey();
delegationKey.readFields(in);
diff --git a/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 3590370..db28d83 100644
--- a/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -40,8 +40,6 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -253,13 +251,12 @@
if (!isFormatted && startOpt != StartupOption.ROLLBACK
&& startOpt != StartupOption.IMPORT)
throw new IOException("NameNode is not formatted.");
- int layoutVersion = storage.getLayoutVersion();
- if (layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
+ if (storage.getLayoutVersion() < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
}
if (startOpt != StartupOption.UPGRADE
- && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
- && layoutVersion != FSConstants.LAYOUT_VERSION) {
+ && storage.getLayoutVersion() < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
+ && storage.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
throw new IOException(
"\nFile system image contains an old layout version "
+ storage.getLayoutVersion() + ".\nAn upgrade to version "
@@ -268,12 +265,12 @@
}
// Upgrade to federation requires -upgrade -clusterid <clusterID> option
- if (startOpt == StartupOption.UPGRADE &&
- !LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ if (startOpt == StartupOption.UPGRADE
+ && storage.getLayoutVersion() > Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
if (startOpt.getClusterId() == null) {
throw new IOException(
"\nFile system image contains an old layout version "
- + layoutVersion + ".\nAn upgrade to version "
+ + storage.getLayoutVersion() + ".\nAn upgrade to version "
+ FSConstants.LAYOUT_VERSION
+ " is required.\nPlease restart NameNode with "
+ "-upgrade -clusterid <clusterID> option.");
diff --git a/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index ff487df..5e38f2b 100644
--- a/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -40,8 +40,6 @@
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text;
@@ -171,7 +169,7 @@
// read compression related info
FSImageCompression compression;
- if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
+ if (imgVersion <= -25) { // -25: 1st version providing compression option
compression = FSImageCompression.readCompressionHeader(conf, in);
} else {
compression = FSImageCompression.createNoopCompression();
@@ -182,8 +180,7 @@
// load all inodes
LOG.info("Number of files = " + numFiles);
- if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
- imgVersion)) {
+ if (imgVersion <= -30) {
loadLocalNameINodes(numFiles, in);
} else {
loadFullNameINodes(numFiles, in);
@@ -232,8 +229,7 @@
*/
private void loadLocalNameINodes(long numFiles, DataInputStream in)
throws IOException {
- assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
- imgVersion);
+ assert imgVersion <= -30; // -30: store only local name in image
assert numFiles > 0;
// load root
@@ -331,7 +327,7 @@
short replication = in.readShort();
replication = namesystem.adjustReplication(replication);
modificationTime = in.readLong();
- if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
+ if (imgVersion <= -17) {
atime = in.readLong();
}
if (imgVersion <= -8) {
@@ -370,19 +366,17 @@
// get quota only when the node is a directory
long nsQuota = -1L;
- if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
- && blocks == null && numBlocks == -1) {
+ if (imgVersion <= -16 && blocks == null && numBlocks == -1) {
nsQuota = in.readLong();
}
long dsQuota = -1L;
- if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
- && blocks == null && numBlocks == -1) {
+ if (imgVersion <= -18 && blocks == null && numBlocks == -1) {
dsQuota = in.readLong();
}
// Read the symlink only when the node is a symlink
String symlink = "";
- if (numBlocks == -2) {
+ if (imgVersion <= -23 && numBlocks == -2) {
symlink = Text.readString(in);
}
@@ -437,7 +431,7 @@
}
private void loadSecretManagerState(DataInputStream in) throws IOException {
- if (!LayoutVersion.supports(Feature.DELEGATION_TOKEN, imgVersion)) {
+ if (imgVersion > -23) {
//SecretManagerState is not available.
//This must not happen if security is turned on.
return;
@@ -447,7 +441,7 @@
private long readNumFiles(DataInputStream in) throws IOException {
- if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) {
+ if (imgVersion <= -16) {
return in.readLong();
} else {
return in.readInt();
diff --git a/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 08682ec..0020c1c 100644
--- a/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -44,8 +44,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.UpgradeManager;
@@ -198,8 +196,8 @@
RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
try {
oldFile.seek(0);
- int oldVersion = oldFile.readInt();
- if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+ int odlVersion = oldFile.readInt();
+ if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
return false;
} finally {
oldFile.close();
@@ -676,8 +674,8 @@
+ sd.getRoot() + " is not formatted.");
}
- // Set Block pool ID in version with federation support
- if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ // No Block pool ID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+ if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
@@ -690,7 +688,7 @@
sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV));
String sMd5 = props.getProperty(MESSAGE_DIGEST_PROPERTY);
- if (LayoutVersion.supports(Feature.FSIMAGE_CHECKSUM, layoutVersion)) {
+ if (layoutVersion <= -26) {
if (sMd5 == null) {
throw new InconsistentFSStateException(sd.getRoot(),
"file " + STORAGE_FILE_VERSION
@@ -721,8 +719,8 @@
StorageDirectory sd
) throws IOException {
super.setFields(props, sd);
- // Set blockpoolID in version with federation support
- if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+ // Set blockpoolID in version LAST_PRE_FEDERATION_LAYOUT_VERSION or before
+ if (layoutVersion < LAST_PRE_FEDERATION_LAYOUT_VERSION) {
props.setProperty("blockpoolID", blockpoolID);
}
boolean uState = getDistributedUpgradeState();
@@ -1021,7 +1019,7 @@
throw new InconsistentFSStateException(storage,
"Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
}
- setBlockPoolID(bpid);
+ blockpoolID = bpid;
}
public String getBlockPoolID() {
diff --git a/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java b/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
index 4ce5564..829f04a 100644
--- a/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
+++ b/src/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/EditsLoaderCurrent.java
@@ -21,8 +21,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import static org.apache.hadoop.hdfs.tools.offlineEditsViewer.Tokenizer.ByteToken;
@@ -223,6 +221,11 @@
* Visit OP_RENAME
*/
private void visit_OP_RENAME() throws IOException {
+ if(editsVersion > -21) {
+ throw new IOException("Unexpected op code " + FSEditLogOpCodes.OP_RENAME
+ + " for edit log version " + editsVersion
+ + " (op code 15 only expected for 21 and later)");
+ }
v.visitInt( EditsElement.LENGTH);
v.visitStringUTF8( EditsElement.SOURCE);
v.visitStringUTF8( EditsElement.DESTINATION);
@@ -234,6 +237,12 @@
* Visit OP_CONCAT_DELETE
*/
private void visit_OP_CONCAT_DELETE() throws IOException {
+ if(editsVersion > -22) {
+ throw new IOException("Unexpected op code "
+ + FSEditLogOpCodes.OP_CONCAT_DELETE
+ + " for edit log version " + editsVersion
+ + " (op code 16 only expected for 22 and later)");
+ }
IntToken lengthToken = v.visitInt(EditsElement.LENGTH);
v.visitStringUTF8(EditsElement.CONCAT_TARGET);
// all except of CONCAT_TARGET and TIMESTAMP
@@ -267,6 +276,12 @@
* Visit OP_GET_DELEGATION_TOKEN
*/
private void visit_OP_GET_DELEGATION_TOKEN() throws IOException {
+ if(editsVersion > -24) {
+ throw new IOException("Unexpected op code "
+ + FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN
+ + " for edit log version " + editsVersion
+ + " (op code 18 only expected for 24 and later)");
+ }
v.visitByte( EditsElement.T_VERSION);
v.visitStringText( EditsElement.T_OWNER);
v.visitStringText( EditsElement.T_RENEWER);
@@ -283,6 +298,13 @@
*/
private void visit_OP_RENEW_DELEGATION_TOKEN()
throws IOException {
+
+ if(editsVersion > -24) {
+ throw new IOException("Unexpected op code "
+ + FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN
+ + " for edit log version " + editsVersion
+ + " (op code 19 only expected for 24 and later)");
+ }
v.visitByte( EditsElement.T_VERSION);
v.visitStringText( EditsElement.T_OWNER);
v.visitStringText( EditsElement.T_RENEWER);
@@ -299,6 +321,13 @@
*/
private void visit_OP_CANCEL_DELEGATION_TOKEN()
throws IOException {
+
+ if(editsVersion > -24) {
+ throw new IOException("Unexpected op code "
+ + FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN
+ + " for edit log version " + editsVersion
+ + " (op code 20 only expected for 24 and later)");
+ }
v.visitByte( EditsElement.T_VERSION);
v.visitStringText( EditsElement.T_OWNER);
v.visitStringText( EditsElement.T_RENEWER);
@@ -314,6 +343,13 @@
*/
private void visit_OP_UPDATE_MASTER_KEY()
throws IOException {
+
+ if(editsVersion > -24) {
+ throw new IOException("Unexpected op code "
+ + FSEditLogOpCodes.OP_UPDATE_MASTER_KEY
+ + " for edit log version " + editsVersion
+ + "(op code 21 only expected for 24 and later)");
+ }
v.visitVInt( EditsElement.KEY_ID);
v.visitVLong( EditsElement.KEY_EXPIRY_DATE);
VIntToken blobLengthToken = v.visitVInt(EditsElement.KEY_LENGTH);
@@ -418,8 +454,7 @@
v.leaveEnclosingElement(); // DATA
- if (editsOpCode != FSEditLogOpCodes.OP_INVALID &&
- LayoutVersion.supports(Feature.EDITS_CHESKUM, editsVersion)) {
+ if (editsOpCode != FSEditLogOpCodes.OP_INVALID && editsVersion <= -28) {
v.visitInt(EditsElement.CHECKSUM);
}
v.leaveEnclosingElement(); // RECORD
diff --git a/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
index bdbd4ab..6a806cc 100644
--- a/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
+++ b/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
@@ -26,8 +26,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
@@ -157,7 +155,7 @@
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
- if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
+ if (imageVersion <= -25) {
boolean isCompressed = in.readBoolean();
v.visit(ImageElement.IS_COMPRESSED, imageVersion);
if (isCompressed) {
@@ -177,7 +175,7 @@
processINodesUC(in, v, skipBlocks);
- if (LayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) {
+ if (imageVersion <= -24) {
processDelegationTokens(in, v);
}
@@ -336,7 +334,7 @@
v.visitEnclosingElement(ImageElement.INODES,
ImageElement.NUM_INODES, numInodes);
- if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
+ if (imageVersion <= -30) { // local file name
processLocalNameINodes(in, v, numInodes, skipBlocks);
} else { // full path name
processFullNameINodes(in, v, numInodes, skipBlocks);
@@ -398,6 +396,7 @@
* @param v visitor
* @param skipBlocks skip blocks or not
* @param parentName the name of its parent node
+ * @return the number of Children
* @throws IOException
*/
private void processINode(DataInputStream in, ImageVisitor v,
@@ -414,7 +413,7 @@
v.visit(ImageElement.INODE_PATH, pathName);
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
- if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
+ if(imageVersion <= -17) // added in version -17
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
@@ -424,10 +423,10 @@
// File or directory
if (numBlocks > 0 || numBlocks == -1) {
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
- if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
+ if(imageVersion <= -18) // added in version -18
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
}
- if (numBlocks == -2) {
+ if (imageVersion <= -23 && numBlocks == -2) {
v.visit(ImageElement.SYMLINK, Text.readString(in));
}
diff --git a/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java b/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
index 1d3208d..18cca68 100644
--- a/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
+++ b/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
@@ -47,7 +47,8 @@
"saving the results in OUTPUTFILE.\n" +
"\n" +
"The oiv utility will attempt to parse correctly formed image files\n" +
- "and will abort fail with mal-formed image files.\n" +
+ "and will abort fail with mal-formed image files. Currently the\n" +
+ "supports FSImage layout versions -16 through -23.\n" +
"\n" +
"The tool works offline and does not require a running cluster in\n" +
"order to process an image file.\n" +
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 4df2ac7..a9b8071 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -47,13 +47,10 @@
*/
public class TestDFSUpgradeFromImage extends TestCase {
- private static final Log LOG = LogFactory
- .getLog(TestDFSUpgradeFromImage.class);
+ private static final Log LOG = LogFactory.getLog(
+ "org.apache.hadoop.hdfs.TestDFSUpgradeFromImage");
private static File TEST_ROOT_DIR =
new File(MiniDFSCluster.getBaseDirectory());
- private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
- private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
- private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
public int numDataNodes = 4;
@@ -67,26 +64,24 @@
boolean printChecksum = false;
- public void unpackStorage() throws IOException {
- unpackStorage(HADOOP14_IMAGE);
+ protected void setUp() throws IOException {
+ unpackStorage();
}
- private void unpackStorage(String tarFileName)
- throws IOException {
- String tarFile = System.getProperty("test.cache.data", "build/test/cache")
- + "/" + tarFileName;
+ public void unpackStorage() throws IOException {
+ String tarFile = System.getProperty("test.cache.data", "build/test/cache") +
+ "/hadoop-14-dfs-dir.tgz";
String dataDir = System.getProperty("test.build.data", "build/test/data");
File dfsDir = new File(dataDir, "dfs");
if ( dfsDir.exists() && !FileUtil.fullyDelete(dfsDir) ) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
- LOG.info("Unpacking " + tarFile);
FileUtil.unTar(new File(tarFile), new File(dataDir));
//Now read the reference info
- BufferedReader reader = new BufferedReader(new FileReader(
- System.getProperty("test.cache.data", "build/test/cache")
- + "/" + HADOOP_DFS_DIR_TXT));
+ BufferedReader reader = new BufferedReader(
+ new FileReader(System.getProperty("test.cache.data", "build/test/cache") +
+ "/hadoop-dfs-dir.txt"));
String line;
while ( (line = reader.readLine()) != null ) {
@@ -182,8 +177,7 @@
}
}
- public void testUpgradeFromRel14Image() throws IOException {
- unpackStorage();
+ public void testUpgradeFromImage() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
@@ -252,43 +246,8 @@
.build();
fail("Was able to start NN from 0.3.0 image");
} catch (IOException ioe) {
+ LOG.info("Got expected exception", ioe);
assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
}
}
-
- /**
- * Test upgrade from 0.22 image
- */
- /* commented for hudson build to run. Will be uncommented once
- * the jar tgz file required for this test is checked in
- public void testUpgradeFromRel22Image() throws IOException {
- unpackStorage(HADOOP_22_IMAGE, null);
- MiniDFSCluster cluster = null;
- try {
- Configuration conf = new HdfsConfiguration();
- if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
- System.setProperty("test.build.data", "build/test/data");
- }
- conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
- cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDataNodes)
- .format(false)
- .startupOption(StartupOption.UPGRADE)
- .clusterId("testClusterId")
- .build();
- cluster.waitActive();
- DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
- DFSClient dfsClient = dfs.dfs;
- //Safemode will be off only after upgrade is complete. Wait for it.
- while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
- LOG.info("Waiting for SafeMode to be OFF.");
- try {
- Thread.sleep(1000);
- } catch (InterruptedException ignored) {}
- }
- } finally {
- if (cluster != null) { cluster.shutdown(); }
- }
- }
- */
}
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java b/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
index bb5e18d..b84e4ff 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -34,9 +34,7 @@
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -50,6 +48,7 @@
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.test.GenericTestUtils;
/**
* This class defines a number of static helper methods used by the
@@ -462,7 +461,7 @@
public static void createBlockPoolVersionFile(File bpDir,
StorageInfo version, String bpid) throws IOException {
// Create block pool version files
- if (LayoutVersion.supports(Feature.FEDERATION, version.layoutVersion)) {
+ if (version.layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
bpid);
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-22-dfs-dir.tgz b/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-22-dfs-dir.tgz
deleted file mode 100644
index 68188af..0000000
--- a/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-22-dfs-dir.tgz
+++ /dev/null
Binary files differ
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
deleted file mode 100644
index d329f49..0000000
--- a/src/test/hdfs/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.EnumSet;
-
-import static org.junit.Assert.*;
-import org.junit.Test;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-
-/**
- * Test for {@link LayoutVersion}
- */
-public class TestLayoutVersion {
-
- /**
- * Tests to make sure a given layout version supports all the
- * features from the ancestor
- */
- @Test
- public void testFeaturesFromAncestorSupported() {
- for (Feature f : Feature.values()) {
- validateFeatureList(f);
- }
- }
-
- /**
- * Test to make sure 0.20.203 supports delegation token
- */
- @Test
- public void testRelease203() {
- assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN,
- Feature.RESERVED_REL20_203.lv));
- }
-
- /**
- * Test to make sure 0.20.204 supports delegation token
- */
- @Test
- public void testRelease204() {
- assertTrue(LayoutVersion.supports(Feature.DELEGATION_TOKEN,
- Feature.RESERVED_REL20_204.lv));
- }
-
- /**
- * Given feature {@code f}, ensures the layout version of that feature
- * supports all the features supported by it's ancestor.
- */
- private void validateFeatureList(Feature f) {
- int lv = f.lv;
- int ancestorLV = f.ancestorLV;
- EnumSet<Feature> ancestorSet = LayoutVersion.map.get(ancestorLV);
- assertNotNull(ancestorSet);
- for (Feature feature : ancestorSet) {
- assertTrue(LayoutVersion.supports(feature, lv));
- }
- }
-}