Rename StorageArchiver to StoragePurger as suggested by Matt and Ivan in the comments on HDFS-1073
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1073@1151192 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
index 72b4592..874c847 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
@@ -19,7 +19,7 @@
import java.io.IOException;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
/**
@@ -54,7 +54,7 @@
}
@Override
- public void archiveLogsOlderThan(long minTxIdToKeep, StorageArchiver archiver)
+ public void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
throws IOException {
}
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 35bfa6d..565d429 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -36,7 +36,7 @@
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -870,24 +870,24 @@
/**
* Archive any log files that are older than the given txid.
*/
- public void archiveLogsOlderThan(
- final long minTxIdToKeep, final StorageArchiver archiver) {
+ public void purgeLogsOlderThan(
+ final long minTxIdToKeep, final StoragePurger purger) {
synchronized (this) {
// synchronized to prevent findbugs warning about inconsistent
// synchronization. This will be JIT-ed out if asserts are
// off.
assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op
minTxIdToKeep <= curSegmentTxId :
- "cannot archive logs older than txid " + minTxIdToKeep +
+ "cannot purge logs older than txid " + minTxIdToKeep +
" when current segment starts at " + curSegmentTxId;
}
mapJournalsAndReportErrors(new JournalClosure() {
@Override
public void apply(JournalAndStream jas) throws IOException {
- jas.manager.archiveLogsOlderThan(minTxIdToKeep, archiver);
+ jas.manager.purgeLogsOlderThan(minTxIdToKeep, purger);
}
- }, "archiving logs older than " + minTxIdToKeep);
+ }, "purging logs older than " + minTxIdToKeep);
}
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index a8d5477..717d39f 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -853,18 +853,18 @@
// Since we now have a new checkpoint, we can clean up some
// old edit logs and checkpoints.
- archiveOldStorage();
+ purgeOldStorage();
}
/**
- * Archive any files in the storage directories that are no longer
+ * Purge any files in the storage directories that are no longer
* necessary.
*/
- public void archiveOldStorage() {
+ public void purgeOldStorage() {
try {
- archivalManager.archiveOldStorage();
+ archivalManager.purgeOldStorage();
} catch (Exception e) {
- LOG.warn("Unable to archive old storage", e);
+ LOG.warn("Unable to purge old storage", e);
}
}
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index cd423b9..6ca030b 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -27,7 +27,7 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -91,7 +91,7 @@
}
@Override
- public void archiveLogsOlderThan(long minTxIdToKeep, StorageArchiver archiver)
+ public void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
throws IOException {
File[] files = FileUtil.listFiles(sd.getCurrentDir());
List<FoundEditLog> editLogs =
@@ -99,7 +99,7 @@
for (FoundEditLog log : editLogs) {
if (log.getStartTxId() < minTxIdToKeep &&
log.getLastTxId() < minTxIdToKeep) {
- archiver.archiveLog(log);
+ purger.purgeLog(log);
}
}
}
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
index 3a9d9c8..8753b27 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
@@ -150,7 +150,7 @@
// Now that we have a new checkpoint, we might be able to
// remove some old ones.
- nnImage.archiveOldStorage();
+ nnImage.purgeOldStorage();
} finally {
currentlyDownloadingCheckpoints.remove(txid);
}
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
index 694ce16..bc274be 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
@@ -19,7 +19,7 @@
import java.io.IOException;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
/**
* A JournalManager is responsible for managing a single place of storing
@@ -52,10 +52,10 @@
*
* @param minTxIdToKeep the earliest txid that must be retained after purging
* old logs
- * @param archiver the archival implementation to use
+ * @param purger the purging implementation to use
* @throws IOException if purging fails
*/
- void archiveLogsOlderThan(long minTxIdToKeep, StorageArchiver archiver)
+ void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
throws IOException;
/**
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java
index 0a6cb10..6d2679b 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java
@@ -39,7 +39,7 @@
* directories of the NN and enforcing a retention policy on checkpoints
* and edit logs.
*
- * It delegates the actual removal of files to a StorageArchiver
+ * It delegates the actual removal of files to a StoragePurger
* implementation, which might delete the files or instead copy them to
* a filer or HDFS for later analysis.
*/
@@ -48,47 +48,47 @@
private final int numCheckpointsToRetain;
private static final Log LOG = LogFactory.getLog(NNStorageArchivalManager.class);
private final NNStorage storage;
- private final StorageArchiver archiver;
+ private final StoragePurger purger;
private final FSEditLog editLog;
public NNStorageArchivalManager(
Configuration conf,
NNStorage storage,
FSEditLog editLog,
- StorageArchiver archiver) {
+ StoragePurger purger) {
this.numCheckpointsToRetain = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,
DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT);
this.storage = storage;
this.editLog = editLog;
- this.archiver = archiver;
+ this.purger = purger;
}
public NNStorageArchivalManager(Configuration conf, NNStorage storage,
FSEditLog editLog) {
- this(conf, storage, editLog, new DeletionStorageArchiver());
+ this(conf, storage, editLog, new DeletionStoragePurger());
}
- public void archiveOldStorage() throws IOException {
+ public void purgeOldStorage() throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
storage.inspectStorageDirs(inspector);
long minImageTxId = getImageTxIdToRetain(inspector);
- archiveCheckpointsOlderThan(inspector, minImageTxId);
+ purgeCheckpointsOlderThan(inspector, minImageTxId);
// If fsimage_N is the image we want to keep, then we need to keep
// all txns > N. We can remove anything < N+1, since fsimage_N
// reflects the state up to and including N.
- editLog.archiveLogsOlderThan(minImageTxId + 1, archiver);
+ editLog.purgeLogsOlderThan(minImageTxId + 1, purger);
}
- private void archiveCheckpointsOlderThan(
+ private void purgeCheckpointsOlderThan(
FSImageTransactionalStorageInspector inspector,
long minTxId) {
for (FoundFSImage image : inspector.getFoundImages()) {
if (image.getTxId() < minTxId) {
LOG.info("Purging old image " + image);
- archiver.archiveImage(image);
+ purger.purgeImage(image);
}
}
}
@@ -120,21 +120,21 @@
}
/**
- * Interface responsible for archiving old checkpoints and edit logs.
+ * Interface responsible for disposing of old checkpoints and edit logs.
*/
- static interface StorageArchiver {
- void archiveLog(FoundEditLog log);
- void archiveImage(FoundFSImage image);
+ static interface StoragePurger {
+ void purgeLog(FoundEditLog log);
+ void purgeImage(FoundFSImage image);
}
- static class DeletionStorageArchiver implements StorageArchiver {
+ static class DeletionStoragePurger implements StoragePurger {
@Override
- public void archiveLog(FoundEditLog log) {
+ public void purgeLog(FoundEditLog log) {
deleteOrWarn(log.getFile());
}
@Override
- public void archiveImage(FoundFSImage image) {
+ public void purgeImage(FoundFSImage image) {
deleteOrWarn(image.getFile());
deleteOrWarn(MD5FileUtils.getDigestFileForFile(image.getFile()));
}
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index d1f1a50..014c3eb 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -544,7 +544,7 @@
// Since we've successfully checkpointed, we can remove some old
// image files
- checkpointImage.archiveOldStorage();
+ checkpointImage.purgeOldStorage();
return loadImage;
}
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java
index ff709ff..41dca5b 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java
@@ -91,7 +91,7 @@
getInProgressEditsFileName(3));
doSaveNamespace(nn);
- LOG.info("After second save, image 0 should be archived, " +
+ LOG.info("After second save, image 0 should be purged, " +
"and image 4 should exist in both.");
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
@@ -110,21 +110,21 @@
LOG.info("Restoring accessibility of first storage dir");
sd0.setExecutable(true);
- LOG.info("nothing should have been archived in first storage dir");
+ LOG.info("nothing should have been purged in first storage dir");
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
assertGlobEquals(cd0, "edits_.*",
getFinalizedEditsFileName(3, 4),
getInProgressEditsFileName(5));
- LOG.info("fsimage_2 should be archived in second storage dir");
+ LOG.info("fsimage_2 should be purged in second storage dir");
assertGlobEquals(cd1, "fsimage_\\d*",
getImageFileName(4), getImageFileName(6));
assertGlobEquals(cd1, "edits_.*",
getFinalizedEditsFileName(5, 6),
getInProgressEditsFileName(7));
- LOG.info("On next save, we should archive logs from the failed dir," +
+ LOG.info("On next save, we should purge logs from the failed dir," +
" but not images, since the image directory is in failed state.");
doSaveNamespace(nn);
assertGlobEquals(cd1, "fsimage_\\d*",
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java
index 2f556a0..a218332 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java
@@ -31,7 +31,7 @@
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StorageArchiver;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@@ -48,11 +48,11 @@
public class TestNNStorageArchivalManager {
/**
* Test the "easy case" where we have more images in the
- * directory than we need to keep. Should archive the
+ * directory than we need to keep. Should purge the
* old ones.
*/
@Test
- public void testArchiveEasyCase() throws IOException {
+ public void testPurgeEasyCase() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
@@ -64,7 +64,7 @@
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(301,400), false);
tc.addLog("/foo1/current/" + getInProgressEditsFileName(401), false);
- // Test that other files don't get archived
+ // Test that other files don't get purged
tc.addLog("/foo1/current/VERSION", false);
runTest(tc);
}
@@ -73,7 +73,7 @@
* Same as above, but across multiple directories
*/
@Test
- public void testArchiveMultipleDirs() throws IOException {
+ public void testPurgeMultipleDirs() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addRoot("/foo2", NameNodeDirType.IMAGE_AND_EDITS);
@@ -93,10 +93,10 @@
/**
* Test that if we have fewer fsimages than the configured
- * retention, we don't archive any of them
+ * retention, we don't purge any of them
*/
@Test
- public void testArchiveLessThanRetention() throws IOException {
+ public void testPurgeLessThanRetention() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), false);
@@ -132,7 +132,7 @@
}
/**
- * Test that old in-progress logs are properly archived
+ * Test that old in-progress logs are properly purged
*/
@Test
public void testOldInProgress() throws IOException {
@@ -166,45 +166,45 @@
private void runTest(TestCaseDescription tc) throws IOException {
Configuration conf = new Configuration();
- StorageArchiver mockArchiver =
- Mockito.mock(NNStorageArchivalManager.StorageArchiver.class);
- ArgumentCaptor<FoundFSImage> imagesArchivedCaptor =
+ StoragePurger mockPurger =
+ Mockito.mock(NNStorageArchivalManager.StoragePurger.class);
+ ArgumentCaptor<FoundFSImage> imagesPurgedCaptor =
ArgumentCaptor.forClass(FoundFSImage.class);
- ArgumentCaptor<FoundEditLog> logsArchivedCaptor =
+ ArgumentCaptor<FoundEditLog> logsPurgedCaptor =
ArgumentCaptor.forClass(FoundEditLog.class);
- // Ask the manager to archive files we don't need any more
+ // Ask the manager to purge files we don't need any more
new NNStorageArchivalManager(conf,
- tc.mockStorage(), tc.mockEditLog(), mockArchiver)
- .archiveOldStorage();
+ tc.mockStorage(), tc.mockEditLog(), mockPurger)
+ .purgeOldStorage();
- // Verify that it asked the archiver to remove the correct files
- Mockito.verify(mockArchiver, Mockito.atLeast(0))
- .archiveImage(imagesArchivedCaptor.capture());
- Mockito.verify(mockArchiver, Mockito.atLeast(0))
- .archiveLog(logsArchivedCaptor.capture());
+ // Verify that it asked the purger to remove the correct files
+ Mockito.verify(mockPurger, Mockito.atLeast(0))
+ .purgeImage(imagesPurgedCaptor.capture());
+ Mockito.verify(mockPurger, Mockito.atLeast(0))
+ .purgeLog(logsPurgedCaptor.capture());
// Check images
- Set<String> archivedPaths = Sets.newHashSet();
- for (FoundFSImage archived : imagesArchivedCaptor.getAllValues()) {
- archivedPaths.add(archived.getFile().toString());
+ Set<String> purgedPaths = Sets.newHashSet();
+ for (FoundFSImage purged : imagesPurgedCaptor.getAllValues()) {
+ purgedPaths.add(purged.getFile().toString());
}
- Assert.assertEquals(Joiner.on(",").join(tc.expectedArchivedImages),
- Joiner.on(",").join(archivedPaths));
+ Assert.assertEquals(Joiner.on(",").join(tc.expectedPurgedImages),
+ Joiner.on(",").join(purgedPaths));
// Check images
- archivedPaths.clear();
- for (FoundEditLog archived : logsArchivedCaptor.getAllValues()) {
- archivedPaths.add(archived.getFile().toString());
+ purgedPaths.clear();
+ for (FoundEditLog purged : logsPurgedCaptor.getAllValues()) {
+ purgedPaths.add(purged.getFile().toString());
}
- Assert.assertEquals(Joiner.on(",").join(tc.expectedArchivedLogs),
- Joiner.on(",").join(archivedPaths));
+ Assert.assertEquals(Joiner.on(",").join(tc.expectedPurgedLogs),
+ Joiner.on(",").join(purgedPaths));
}
private static class TestCaseDescription {
private Map<String, FakeRoot> dirRoots = Maps.newHashMap();
- private Set<String> expectedArchivedLogs = Sets.newHashSet();
- private Set<String> expectedArchivedImages = Sets.newHashSet();
+ private Set<String> expectedPurgedLogs = Sets.newHashSet();
+ private Set<String> expectedPurgedImages = Sets.newHashSet();
private static class FakeRoot {
NameNodeDirType type;
@@ -234,17 +234,17 @@
}
}
- void addLog(String path, boolean expectArchive) {
+ void addLog(String path, boolean expectPurge) {
addFile(path);
- if (expectArchive) {
- expectedArchivedLogs.add(path);
+ if (expectPurge) {
+ expectedPurgedLogs.add(path);
}
}
- void addImage(String path, boolean expectArchive) {
+ void addImage(String path, boolean expectPurge) {
addFile(path);
- if (expectArchive) {
- expectedArchivedImages.add(path);
+ if (expectPurge) {
+ expectedPurgedImages.add(path);
}
}
@@ -274,15 +274,15 @@
Object[] args = invocation.getArguments();
assert args.length == 2;
long txId = (Long) args[0];
- StorageArchiver archiver = (StorageArchiver) args[1];
+ StoragePurger purger = (StoragePurger) args[1];
for (JournalManager jm : jms) {
- jm.archiveLogsOlderThan(txId, archiver);
+ jm.purgeLogsOlderThan(txId, purger);
}
return null;
}
- }).when(mockLog).archiveLogsOlderThan(
- Mockito.anyLong(), (StorageArchiver) Mockito.anyObject());
+ }).when(mockLog).purgeLogsOlderThan(
+ Mockito.anyLong(), (StoragePurger) Mockito.anyObject());
return mockLog;
}
}