HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index b6f9bdd..adbb133 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,7 +123,6 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -3110,10 +3109,6 @@
     }
   }
 
-  public boolean isInternalSatisfierRunning() throws IOException {
-    return namenode.isInternalSatisfierRunning();
-  }
-
   Tracer getTracer() {
     return tracer;
   }
@@ -3170,25 +3165,4 @@
     checkOpen();
     return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
-
-  /**
-   * Check the storage policy satisfy status of the path for which
-   * {@link DFSClient#satisfyStoragePolicy(String)} is called.
-   *
-   * @return Storage policy satisfy status.
-   *         <ul>
-   *         <li>PENDING if path is in queue and not processed for satisfying
-   *         the policy.</li>
-   *         <li>IN_PROGRESS if satisfying the storage policy for path.</li>
-   *         <li>SUCCESS if storage policy satisfied for the path.</li>
-   *         <li>NOT_AVAILABLE if
-   *         {@link DFSClient#satisfyStoragePolicy(String)} not called for
-   *         path or SPS work is already finished.</li>
-   *         </ul>
-   * @throws IOException
-   */
-  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
-      String path) throws IOException {
-    return namenode.checkStoragePolicySatisfyPathStatus(path);
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 5c51c22..e8c881b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -45,7 +45,6 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -1757,32 +1756,4 @@
    */
   @AtMostOnce
   void satisfyStoragePolicy(String path) throws IOException;
-
-  /**
-   * Check if internal StoragePolicySatisfier is running.
-   * @return true if internal StoragePolicySatisfier is running
-   * @throws IOException
-   */
-  @Idempotent
-  boolean isInternalSatisfierRunning() throws IOException;
-
-  /**
-   * Check the storage policy satisfy status of the path for which
-   * {@link ClientProtocol#satisfyStoragePolicy(String)} is called.
-   *
-   * @return Storage policy satisfy status.
-   *         <ul>
-   *         <li>PENDING if path is in queue and not processed for satisfying
-   *         the policy.</li>
-   *         <li>IN_PROGRESS if satisfying the storage policy for path.</li>
-   *         <li>SUCCESS if storage policy satisfied for the path.</li>
-   *         <li>NOT_AVAILABLE if
-   *         {@link ClientProtocol#satisfyStoragePolicy(String)} not called for
-   *         path or SPS work is already finished.</li>
-   *         </ul>
-   * @throws IOException
-   */
-  @Idempotent
-  StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
-      String path) throws IOException;
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index ab48dcd..6de186a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -134,12 +134,6 @@
   public enum StoragePolicySatisfierMode {
 
     /**
-     * This mode represents that SPS service is running inside Namenode and can
-     * accept any SPS call request.
-     */
-    INTERNAL,
-
-    /**
      * This mode represents that SPS service is running outside Namenode as an
      * external service and can accept any SPS call request.
      */
@@ -166,40 +160,6 @@
     }
   }
 
-
-  /**
-   * Storage policy satisfy path status.
-   */
-  public enum StoragePolicySatisfyPathStatus {
-    /**
-     * Scheduled but not yet processed. This will come only in case of
-     * directory. Directory will be added first in "pendingWorkForDirectory"
-     * queue and then later it is processed recursively.
-     */
-    PENDING,
-
-    /**
-     * Satisfying the storage policy for path.
-     */
-    IN_PROGRESS,
-
-    /**
-     * Storage policy satisfied for the path.
-     */
-    SUCCESS,
-
-    /**
-     * Few blocks failed to move and the path is still not
-     * fully satisfied the storage policy.
-     */
-    FAILURE,
-
-    /**
-     * Status not available.
-     */
-    NOT_AVAILABLE
-  }
-
   public enum RollingUpgradeAction {
     QUERY, PREPARE, FINALIZE;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 683ccca..e4bca51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -70,7 +70,6 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -101,8 +100,6 @@
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
@@ -150,8 +147,6 @@
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsInternalSatisfierRunningRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsInternalSatisfierRunningResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@@ -301,9 +296,6 @@
   private final static GetErasureCodingCodecsRequestProto
       VOID_GET_EC_CODEC_REQUEST = GetErasureCodingCodecsRequestProto
       .newBuilder().build();
-  private final static IsInternalSatisfierRunningRequestProto
-      VOID_IS_SPS_RUNNING_REQUEST = IsInternalSatisfierRunningRequestProto
-      .newBuilder().build();
 
 
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
@@ -1912,18 +1904,6 @@
   }
 
   @Override
-  public boolean isInternalSatisfierRunning() throws IOException {
-    try {
-      IsInternalSatisfierRunningResponseProto rep =
-          rpcProxy.isInternalSatisfierRunning(null,
-              VOID_IS_SPS_RUNNING_REQUEST);
-      return rep.getRunning();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
   public QuotaUsage getQuotaUsage(String path) throws IOException {
     GetQuotaUsageRequestProto req =
         GetQuotaUsageRequestProto.newBuilder().setPath(path).build();
@@ -1977,20 +1957,4 @@
       throw ProtobufHelper.getRemoteException(e);
     }
   }
-
-  @Override
-  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
-      String path) throws IOException {
-    try {
-      CheckStoragePolicySatisfyPathStatusRequestProto request =
-          CheckStoragePolicySatisfyPathStatusRequestProto.newBuilder()
-          .setSrc(path)
-          .build();
-      CheckStoragePolicySatisfyPathStatusResponseProto response = rpcProxy
-          .checkStoragePolicySatisfyPathStatus(null, request);
-      return PBHelperClient.convert(response.getStatus());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 7770e31..4a5a493 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -130,7 +130,6 @@
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
@@ -3399,40 +3398,4 @@
     }
     return typeProtos;
   }
-
-  public static StoragePolicySatisfyPathStatus convert(
-      HdfsConstants.StoragePolicySatisfyPathStatus status) {
-    switch (status) {
-    case PENDING:
-      return StoragePolicySatisfyPathStatus.PENDING;
-    case IN_PROGRESS:
-      return StoragePolicySatisfyPathStatus.IN_PROGRESS;
-    case SUCCESS:
-      return StoragePolicySatisfyPathStatus.SUCCESS;
-    case FAILURE:
-      return StoragePolicySatisfyPathStatus.FAILURE;
-    case NOT_AVAILABLE:
-      return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
-    default:
-      throw new IllegalArgumentException("Unexpected SPSStatus :" + status);
-    }
-  }
-
-  public static HdfsConstants.StoragePolicySatisfyPathStatus convert(
-      StoragePolicySatisfyPathStatus status) {
-    switch (status) {
-    case PENDING:
-      return HdfsConstants.StoragePolicySatisfyPathStatus.PENDING;
-    case IN_PROGRESS:
-      return HdfsConstants.StoragePolicySatisfyPathStatus.IN_PROGRESS;
-    case SUCCESS:
-      return HdfsConstants.StoragePolicySatisfyPathStatus.SUCCESS;
-    case FAILURE:
-      return HdfsConstants.StoragePolicySatisfyPathStatus.FAILURE;
-    case NOT_AVAILABLE:
-      return HdfsConstants.StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
-    default:
-      throw new IllegalArgumentException("Unexpected SPSStatus :" + status);
-    }
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index e8e3a58..49ea3f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -838,28 +838,6 @@
 
 }
 
-message IsInternalSatisfierRunningRequestProto {  // no parameters
-}
-
-message IsInternalSatisfierRunningResponseProto {
-  required bool running = 1;
-}
-
-message CheckStoragePolicySatisfyPathStatusRequestProto {  // no parameters
-  required string src = 1;
-}
-
-message CheckStoragePolicySatisfyPathStatusResponseProto {
-  enum StoragePolicySatisfyPathStatus {
-    PENDING = 0;
-    IN_PROGRESS = 1;
-    SUCCESS = 2;
-    FAILURE = 3;
-    NOT_AVAILABLE = 4;
-  }
-  required StoragePolicySatisfyPathStatus status = 1;
-}
-
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -1048,8 +1026,4 @@
       returns(ListOpenFilesResponseProto);
   rpc satisfyStoragePolicy(SatisfyStoragePolicyRequestProto)
       returns(SatisfyStoragePolicyResponseProto);
-  rpc isInternalSatisfierRunning(IsInternalSatisfierRunningRequestProto)
-      returns(IsInternalSatisfierRunningResponseProto);
-  rpc checkStoragePolicySatisfyPathStatus(CheckStoragePolicySatisfyPathStatusRequestProto)
-      returns(CheckStoragePolicySatisfyPathStatusResponseProto);
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 36645c9..29f32a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -87,7 +87,6 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -2498,19 +2497,6 @@
   }
 
   @Override
-  public boolean isInternalSatisfierRunning() throws IOException {
-    checkOperation(OperationCategory.READ, false);
-    return false;
-  }
-
-  @Override
-  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
-      String path) throws IOException {
-    checkOperation(OperationCategory.READ, false);
-    return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
-  }
-
-  @Override
   public Long getNextSPSPath() throws IOException {
     checkOperation(OperationCategory.READ, false);
     // not supported
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cf383d0..5ed35b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -639,10 +639,6 @@
       "dfs.storage.policy.satisfier.retry.max.attempts";
   public static final int DFS_STORAGE_POLICY_SATISFIER_MAX_RETRY_ATTEMPTS_DEFAULT =
       3;
-  public static final String DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_KEY =
-      "dfs.storage.policy.satisfier.low.max-streams.preference";
-  public static final boolean DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_DEFAULT =
-      true;
   public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
       "dfs.storage.policy.satisfier.max.outstanding.paths";
   public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 10000;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index b0816cb..e51529e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -48,7 +48,6 @@
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -86,8 +85,6 @@
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckStoragePolicySatisfyPathStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
@@ -162,8 +159,6 @@
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsInternalSatisfierRunningRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsInternalSatisfierRunningResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@@ -1865,22 +1860,6 @@
   }
 
   @Override
-  public IsInternalSatisfierRunningResponseProto
-      isInternalSatisfierRunning(RpcController controller,
-      IsInternalSatisfierRunningRequestProto req)
-      throws ServiceException {
-    try {
-      boolean ret = server.isInternalSatisfierRunning();
-      IsInternalSatisfierRunningResponseProto.Builder builder =
-          IsInternalSatisfierRunningResponseProto.newBuilder();
-      builder.setRunning(ret);
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
   public GetQuotaUsageResponseProto getQuotaUsage(
       RpcController controller, GetQuotaUsageRequestProto req)
       throws ServiceException {
@@ -1925,22 +1904,4 @@
     }
     return VOID_SATISFYSTORAGEPOLICY_RESPONSE;
   }
-
-  @Override
-  public CheckStoragePolicySatisfyPathStatusResponseProto
-      checkStoragePolicySatisfyPathStatus(RpcController controller,
-      CheckStoragePolicySatisfyPathStatusRequestProto request)
-      throws ServiceException {
-    try {
-      StoragePolicySatisfyPathStatus status = server
-          .checkStoragePolicySatisfyPathStatus(request.getSrc());
-      CheckStoragePolicySatisfyPathStatusResponseProto.Builder builder =
-          CheckStoragePolicySatisfyPathStatusResponseProto
-          .newBuilder();
-      builder.setStatus(PBHelperClient.convert(status));
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index f51f839..ac01348 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -42,11 +42,9 @@
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockMovingInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DropSPSWorkCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
@@ -56,11 +54,9 @@
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockStorageMovementCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
@@ -102,8 +98,6 @@
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
@@ -111,7 +105,6 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DropSPSWorkCommand;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
@@ -143,10 +136,6 @@
   private static final RegisterCommandProto REG_CMD_PROTO = 
       RegisterCommandProto.newBuilder().build();
   private static final RegisterCommand REG_CMD = new RegisterCommand();
-  private static final DropSPSWorkCommandProto DROP_SPS_WORK_CMD_PROTO =
-      DropSPSWorkCommandProto.newBuilder().build();
-  private static final DropSPSWorkCommand DROP_SPS_WORK_CMD =
-      new DropSPSWorkCommand();
 
   private PBHelper() {
     /** Hidden constructor */
@@ -480,10 +469,6 @@
       return PBHelper.convert(proto.getBlkIdCmd());
     case BlockECReconstructionCommand:
       return PBHelper.convert(proto.getBlkECReconstructionCmd());
-    case BlockStorageMovementCommand:
-      return PBHelper.convert(proto.getBlkStorageMovementCmd());
-    case DropSPSWorkCommand:
-      return DROP_SPS_WORK_CMD;
     default:
       return null;
     }
@@ -618,15 +603,6 @@
           .setBlkECReconstructionCmd(
               convert((BlockECReconstructionCommand) datanodeCommand));
       break;
-    case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
-      builder.setCmdType(DatanodeCommandProto.Type.BlockStorageMovementCommand)
-          .setBlkStorageMovementCmd(
-              convert((BlockStorageMovementCommand) datanodeCommand));
-      break;
-    case DatanodeProtocol.DNA_DROP_SPS_WORK_COMMAND:
-      builder.setCmdType(DatanodeCommandProto.Type.DropSPSWorkCommand)
-          .setDropSPSWorkCmd(DROP_SPS_WORK_CMD_PROTO);
-      break;
     case DatanodeProtocol.DNA_UNKNOWN: //Not expected
     default:
       builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
@@ -1148,79 +1124,4 @@
 
     return new FileRegion(block, providedStorageLocation);
   }
-
-  private static BlockStorageMovementCommandProto convert(
-      BlockStorageMovementCommand blkStorageMovementCmd) {
-    BlockStorageMovementCommandProto.Builder builder =
-        BlockStorageMovementCommandProto.newBuilder();
-
-    builder.setBlockPoolId(blkStorageMovementCmd.getBlockPoolId());
-    Collection<BlockMovingInfo> blockMovingInfos = blkStorageMovementCmd
-        .getBlockMovingTasks();
-    for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
-      builder.addBlockMovingInfo(convertBlockMovingInfo(blkMovingInfo));
-    }
-    return builder.build();
-  }
-
-  private static BlockMovingInfoProto convertBlockMovingInfo(
-      BlockMovingInfo blkMovingInfo) {
-    BlockMovingInfoProto.Builder builder = BlockMovingInfoProto
-        .newBuilder();
-    builder.setBlock(PBHelperClient.convert(blkMovingInfo.getBlock()));
-
-    DatanodeInfo sourceDnInfo = blkMovingInfo.getSource();
-    builder.setSourceDnInfo(PBHelperClient.convert(sourceDnInfo));
-
-    DatanodeInfo targetDnInfo = blkMovingInfo.getTarget();
-    builder.setTargetDnInfo(PBHelperClient.convert(targetDnInfo));
-
-    StorageType sourceStorageType = blkMovingInfo.getSourceStorageType();
-    builder.setSourceStorageType(
-        PBHelperClient.convertStorageType(sourceStorageType));
-
-    StorageType targetStorageType = blkMovingInfo.getTargetStorageType();
-    builder.setTargetStorageType(
-        PBHelperClient.convertStorageType(targetStorageType));
-
-    return builder.build();
-  }
-
-  private static DatanodeCommand convert(
-      BlockStorageMovementCommandProto blkStorageMovementCmdProto) {
-    Collection<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
-    List<BlockMovingInfoProto> blkSPSatisfyList =
-        blkStorageMovementCmdProto.getBlockMovingInfoList();
-    for (BlockMovingInfoProto blkSPSatisfy : blkSPSatisfyList) {
-      blockMovingInfos.add(convertBlockMovingInfo(blkSPSatisfy));
-    }
-    return new BlockStorageMovementCommand(
-        DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT,
-        blkStorageMovementCmdProto.getBlockPoolId(), blockMovingInfos);
-  }
-
-  private static BlockMovingInfo convertBlockMovingInfo(
-      BlockMovingInfoProto blockStorageMovingInfoProto) {
-    BlockProto blockProto = blockStorageMovingInfoProto.getBlock();
-    Block block = PBHelperClient.convert(blockProto);
-
-    DatanodeInfoProto sourceDnInfoProto = blockStorageMovingInfoProto
-        .getSourceDnInfo();
-    DatanodeInfo sourceDnInfo = PBHelperClient.convert(sourceDnInfoProto);
-
-    DatanodeInfoProto targetDnInfoProto = blockStorageMovingInfoProto
-        .getTargetDnInfo();
-    DatanodeInfo targetDnInfo = PBHelperClient.convert(targetDnInfoProto);
-    StorageTypeProto srcStorageTypeProto = blockStorageMovingInfoProto
-        .getSourceStorageType();
-    StorageType srcStorageType = PBHelperClient
-        .convertStorageType(srcStorageTypeProto);
-
-    StorageTypeProto targetStorageTypeProto = blockStorageMovingInfoProto
-        .getTargetStorageType();
-    StorageType targetStorageType = PBHelperClient
-        .convertStorageType(targetStorageTypeProto);
-    return new BlockMovingInfo(block, sourceDnInfo, targetDnInfo,
-        srcStorageType, targetStorageType);
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
index 5cf4204..6bf2986 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
@@ -30,8 +30,7 @@
   IO_EXCEPTION(-4),
   ILLEGAL_ARGUMENTS(-5),
   INTERRUPTED(-6),
-  UNFINALIZED_UPGRADE(-7),
-  SKIPPED_DUE_TO_SPS(-8);
+  UNFINALIZED_UPGRADE(-7);
 
   private final int code;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bb63f2a..87bd155 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -93,7 +93,6 @@
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -719,9 +718,6 @@
     datanodeManager.close();
     pendingReconstruction.stop();
     blocksMap.close();
-    if (getSPSManager() != null) {
-      getSPSManager().stopGracefully();
-    }
   }
 
   /** @return the datanodeManager */
@@ -3889,21 +3885,6 @@
     }
     processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
         delHintNode);
-
-    // notify SPS about the reported block
-    notifyStorageMovementAttemptFinishedBlk(storageInfo, block);
-  }
-
-  private void notifyStorageMovementAttemptFinishedBlk(
-      DatanodeStorageInfo storageInfo, Block block) {
-    if (getSPSManager() != null) {
-      SPSService sps = getSPSManager().getInternalSPSService();
-      if (sps.isRunning()) {
-        sps.notifyStorageMovementAttemptFinishedBlk(
-            storageInfo.getDatanodeDescriptor(), storageInfo.getStorageType(),
-            block);
-      }
-    }
   }
   
   private void processAndHandleReportedBlock(
@@ -5088,7 +5069,7 @@
       LOG.info("Storage policy satisfier is disabled");
       return false;
     }
-    spsManager = new StoragePolicySatisfyManager(conf, namesystem, this);
+    spsManager = new StoragePolicySatisfyManager(conf, namesystem);
     return true;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 24b948c..9c96f16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -43,7 +43,6 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
@@ -207,14 +206,6 @@
   private final LightWeightHashSet<Block> invalidateBlocks =
       new LightWeightHashSet<>();
 
-  /**
-   * A queue of blocks corresponding to trackID for moving its storage
-   * placements by this datanode.
-   */
-  private final BlockQueue<BlockMovingInfo> storageMovementBlocks =
-      new BlockQueue<>();
-  private volatile boolean dropSPSWork = false;
-
   /* Variables for maintaining number of blocks scheduled to be written to
    * this storage. This count is approximate and might be slightly bigger
    * in case of errors (e.g. datanode does not report if an error occurs
@@ -369,7 +360,6 @@
     this.pendingCached.clear();
     this.cached.clear();
     this.pendingUncached.clear();
-    this.storageMovementBlocks.clear();
   }
 
   public int numBlocks() {
@@ -1075,62 +1065,4 @@
     }
     return false;
   }
-
-  /**
-   * Add the block infos which needs to move its storage locations.
-   *
-   * @param blkMovingInfo
-   *          - storage mismatched block info
-   */
-  public void addBlocksToMoveStorage(BlockMovingInfo blkMovingInfo) {
-    storageMovementBlocks.offer(blkMovingInfo);
-    BlockManager.LOG
-        .debug("Adding block move task " + blkMovingInfo + " to " + getName()
-            + ", current queue size is " + storageMovementBlocks.size());
-  }
-
-  /**
-   * Return the number of blocks queued up for movement.
-   */
-  public int getNumberOfBlocksToMoveStorages() {
-    return storageMovementBlocks.size();
-  }
-
-  /**
-   * Get the blocks to move to satisfy the storage media type.
-   *
-   * @param numBlocksToMoveTasks
-   *          total number of blocks which will be send to this datanode for
-   *          block movement.
-   *
-   * @return block infos which needs to move its storage locations or null if
-   *         there is no block infos to move.
-   */
-  public BlockMovingInfo[] getBlocksToMoveStorages(int numBlocksToMoveTasks) {
-    List<BlockMovingInfo> blockMovingInfos = storageMovementBlocks
-        .poll(numBlocksToMoveTasks);
-    if (blockMovingInfos == null || blockMovingInfos.size() <= 0) {
-      return null;
-    }
-    BlockMovingInfo[] blkMoveArray = new BlockMovingInfo[blockMovingInfos
-        .size()];
-    return blockMovingInfos.toArray(blkMoveArray);
-  }
-
-  /**
-   * Set whether to drop SPS related queues at DN side.
-   *
-   * @param dropSPSWork
-   *          - true if need to drop SPS queues, otherwise false.
-   */
-  public synchronized void setDropSPSWork(boolean dropSPSWork) {
-    this.dropSPSWork = dropSPSWork;
-  }
-
-  /**
-   * @return true if need to drop SPS queues at DN.
-   */
-  public synchronized boolean shouldDropSPSWork() {
-    return this.dropSPSWork;
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 3542864..4173f48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -49,7 +49,6 @@
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
@@ -210,8 +209,6 @@
    */
   private final long timeBetweenResendingCachingDirectivesMs;
 
-  private final boolean blocksToMoveLowPriority;
-
   DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
       final Configuration conf) throws IOException {
     this.namesystem = namesystem;
@@ -336,12 +333,6 @@
     this.blocksPerPostponedMisreplicatedBlocksRescan = conf.getLong(
         DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY,
         DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY_DEFAULT);
-
-    // SPS configuration to decide blocks to move can share equal ratio of
-    // maxtransfers with pending replica and erasure-coded reconstruction tasks
-    blocksToMoveLowPriority = conf.getBoolean(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_KEY,
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_DEFAULT);
   }
 
   private static long getStaleIntervalFromConf(Configuration conf,
@@ -1101,19 +1092,6 @@
           nodeS.setSoftwareVersion(nodeReg.getSoftwareVersion());
           nodeS.setDisallowed(false); // Node is in the include list
 
-          // Sets dropSPSWork flag to true, to ensure that
-          // DNA_DROP_SPS_WORK_COMMAND will send to datanode via next heartbeat
-          // response immediately after the node registration. This is
-          // to avoid a situation, where multiple block attempt finished
-          // responses coming from different datanodes. After SPS monitor time
-          // out, it will retry the files which were scheduled to the
-          // disconnected(for long time more than heartbeat expiry) DN, by
-          // finding new datanode. Now, if the expired datanode reconnects back
-          // after SPS reschedules, it leads to get different movement attempt
-          // finished report from reconnected and newly datanode which is
-          // attempting the block movement.
-          nodeS.setDropSPSWork(true);
-
           // resolve network location
           if(this.rejectUnresolvedTopologyDN) {
             nodeS.setNetworkLocation(resolveNetworkLocation(nodeS));
@@ -1691,47 +1669,18 @@
     final List<DatanodeCommand> cmds = new ArrayList<>();
     // Allocate _approximately_ maxTransfers pending tasks to DataNode.
     // NN chooses pending tasks based on the ratio between the lengths of
-    // replication, erasure-coded block queues and block storage movement
-    // queues.
+    // replication and erasure-coded block queues.
     int totalReplicateBlocks = nodeinfo.getNumberOfReplicateBlocks();
     int totalECBlocks = nodeinfo.getNumberOfBlocksToBeErasureCoded();
-    int totalBlocksToMove = nodeinfo.getNumberOfBlocksToMoveStorages();
     int totalBlocks = totalReplicateBlocks + totalECBlocks;
-    if (totalBlocks > 0 || totalBlocksToMove > 0) {
-      int numReplicationTasks = 0;
-      int numECTasks = 0;
-      int numBlocksToMoveTasks = 0;
-      // Check blocksToMoveLowPriority configuration is true/false. If false,
-      // then equally sharing the max transfer. Otherwise gives high priority to
-      // the pending_replica/erasure-coded tasks and only the delta streams will
-      // be used for blocks to move tasks.
-      if (!blocksToMoveLowPriority) {
-        // add blocksToMove count to total blocks so that will get equal share
-        totalBlocks = totalBlocks + totalBlocksToMove;
-        numReplicationTasks = (int) Math
-            .ceil((double) (totalReplicateBlocks * maxTransfers) / totalBlocks);
-        numECTasks = (int) Math
-            .ceil((double) (totalECBlocks * maxTransfers) / totalBlocks);
-        numBlocksToMoveTasks = (int) Math
-            .ceil((double) (totalBlocksToMove * maxTransfers) / totalBlocks);
-      } else {
-        // Calculate the replica and ec tasks, then pick blocksToMove if there
-        // is any streams available.
-        numReplicationTasks = (int) Math
-            .ceil((double) (totalReplicateBlocks * maxTransfers) / totalBlocks);
-        numECTasks = (int) Math
-            .ceil((double) (totalECBlocks * maxTransfers) / totalBlocks);
-        int numTasks = numReplicationTasks + numECTasks;
-        if (numTasks < maxTransfers) {
-          int remainingMaxTransfers = maxTransfers - numTasks;
-          numBlocksToMoveTasks = Math.min(totalBlocksToMove,
-              remainingMaxTransfers);
-        }
-      }
+    if (totalBlocks > 0) {
+      int numReplicationTasks = (int) Math.ceil(
+          (double) (totalReplicateBlocks * maxTransfers) / totalBlocks);
+      int numECTasks = (int) Math.ceil(
+          (double) (totalECBlocks * maxTransfers) / totalBlocks);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Pending replication tasks: " + numReplicationTasks
-            + " erasure-coded tasks: " + numECTasks + " blocks to move tasks: "
-            + numBlocksToMoveTasks);
+            + " erasure-coded tasks: " + numECTasks);
       }
       // check pending replication tasks
       List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
@@ -1747,23 +1696,6 @@
         cmds.add(new BlockECReconstructionCommand(
             DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList));
       }
-      // check pending block storage movement tasks
-      if (nodeinfo.shouldDropSPSWork()) {
-        cmds.add(DropSPSWorkCommand.DNA_DROP_SPS_WORK_COMMAND);
-        // Set back to false to indicate that the new value has been sent to the
-        // datanode.
-        nodeinfo.setDropSPSWork(false);
-      } else {
-        // Get pending block storage movement tasks
-        BlockMovingInfo[] blkStorageMovementInfos = nodeinfo
-            .getBlocksToMoveStorages(numBlocksToMoveTasks);
-
-        if (blkStorageMovementInfos != null) {
-          cmds.add(new BlockStorageMovementCommand(
-              DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT, blockPoolId,
-              Arrays.asList(blkStorageMovementInfos)));
-        }
-      }
     }
 
     // check block invalidation
@@ -2037,18 +1969,6 @@
   }
 
   /**
-   * Mark all DNs to drop SPS queues. A DNA_DROP_SPS_WORK_COMMAND will be added
-   * in heartbeat response, which will indicate DN to drop SPS queues
-   */
-  public void addDropSPSWorkCommandsToAllDNs() {
-    synchronized (this) {
-      for (DatanodeDescriptor dn : datanodeMap.values()) {
-        dn.setDropSPSWork(true);
-      }
-    }
-  }
-
-  /**
    * Generates datanode reports for the given report type.
    *
    * @param type
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 1656b16..a25f6a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -795,16 +795,6 @@
           ((BlockECReconstructionCommand) cmd).getECTasks();
       dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
       break;
-    case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
-      LOG.info("DatanodeCommand action: DNA_BLOCK_STORAGE_MOVEMENT");
-      BlockStorageMovementCommand blkSPSCmd = (BlockStorageMovementCommand) cmd;
-      dn.getStoragePolicySatisfyWorker().processBlockMovingTasks(
-          blkSPSCmd.getBlockPoolId(), blkSPSCmd.getBlockMovingTasks());
-      break;
-    case DatanodeProtocol.DNA_DROP_SPS_WORK_COMMAND:
-      LOG.info("DatanodeCommand action: DNA_DROP_SPS_WORK_COMMAND");
-      dn.getStoragePolicySatisfyWorker().dropSPSWork();
-      break;
     default:
       LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
     }
@@ -835,8 +825,6 @@
     case DatanodeProtocol.DNA_CACHE:
     case DatanodeProtocol.DNA_UNCACHE:
     case DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION:
-    case DatanodeProtocol.DNA_BLOCK_STORAGE_MOVEMENT:
-    case DatanodeProtocol.DNA_DROP_SPS_WORK_COMMAND:
       LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction());
       break;
     default:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 21af33f..aa044f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -386,7 +386,6 @@
   private String dnUserName = null;
   private BlockRecoveryWorker blockRecoveryWorker;
   private ErasureCodingWorker ecWorker;
-  private StoragePolicySatisfyWorker storagePolicySatisfyWorker;
   private final Tracer tracer;
   private final TracerConfigurationManager tracerConfigurationManager;
   private static final int NUM_CORES = Runtime.getRuntime()
@@ -1426,9 +1425,6 @@
 
     ecWorker = new ErasureCodingWorker(getConf(), this);
     blockRecoveryWorker = new BlockRecoveryWorker(this);
-    storagePolicySatisfyWorker =
-        new StoragePolicySatisfyWorker(getConf(), this, null);
-    storagePolicySatisfyWorker.start();
 
     blockPoolManager = new BlockPoolManager(this);
     blockPoolManager.refreshNamenodes(getConf());
@@ -1981,10 +1977,6 @@
       }
     }
 
-    // stop storagePolicySatisfyWorker
-    if (storagePolicySatisfyWorker != null) {
-      storagePolicySatisfyWorker.stop();
-    }
     List<BPOfferService> bposArray = (this.blockPoolManager == null)
         ? new ArrayList<BPOfferService>()
         : this.blockPoolManager.getAllNamenodeThreads();
@@ -3624,8 +3616,4 @@
     }
     return this.diskBalancer;
   }
-
-  StoragePolicySatisfyWorker getStoragePolicySatisfyWorker() {
-    return storagePolicySatisfyWorker;
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
deleted file mode 100644
index 0157205..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
-import org.apache.hadoop.hdfs.server.common.sps.BlockMovementAttemptFinished;
-import org.apache.hadoop.hdfs.server.common.sps.BlockMovementStatus;
-import org.apache.hadoop.hdfs.server.common.sps.BlockStorageMovementTracker;
-import org.apache.hadoop.hdfs.server.common.sps.BlocksMovementsStatusHandler;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Daemon;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * StoragePolicySatisfyWorker handles the storage policy satisfier commands.
- * These commands would be issued from NameNode as part of Datanode's heart beat
- * response. BPOfferService delegates the work to this class for handling
- * BlockStorageMovement commands.
- */
-@InterfaceAudience.Private
-public class StoragePolicySatisfyWorker {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(StoragePolicySatisfyWorker.class);
-
-  private final DataNode datanode;
-
-  private final int moverThreads;
-  private final ExecutorService moveExecutor;
-  private final CompletionService<BlockMovementAttemptFinished>
-      moverCompletionService;
-  private final BlockStorageMovementTracker movementTracker;
-  private Daemon movementTrackerThread;
-  private final BlockDispatcher blkDispatcher;
-
-  public StoragePolicySatisfyWorker(Configuration conf, DataNode datanode,
-      BlocksMovementsStatusHandler handler) {
-    this.datanode = datanode;
-    // Defaulting to 10. This is to minimize the number of move ops.
-    moverThreads = conf.getInt(DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY, 10);
-    moveExecutor = initializeBlockMoverThreadPool(moverThreads);
-    moverCompletionService = new ExecutorCompletionService<>(moveExecutor);
-    movementTracker = new BlockStorageMovementTracker(moverCompletionService,
-        handler);
-    movementTrackerThread = new Daemon(movementTracker);
-    movementTrackerThread.setName("BlockStorageMovementTracker");
-    DNConf dnConf = datanode.getDnConf();
-    int ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
-    blkDispatcher = new BlockDispatcher(dnConf.getSocketTimeout(),
-        ioFileBufferSize, dnConf.getConnectToDnViaHostname());
-  }
-
-  /**
-   * Start StoragePolicySatisfyWorker, which will start block movement tracker
-   * thread to track the completion of block movements.
-   */
-  void start() {
-    movementTrackerThread.start();
-  }
-
-  /**
-   * Stop StoragePolicySatisfyWorker, which will terminate executor service and
-   * stop block movement tracker thread.
-   */
-  void stop() {
-    movementTracker.stopTracking();
-    movementTrackerThread.interrupt();
-    moveExecutor.shutdown();
-    try {
-      moveExecutor.awaitTermination(500, TimeUnit.MILLISECONDS);
-    } catch (InterruptedException e) {
-      LOG.error("Interrupted while waiting for mover thread to terminate", e);
-    }
-  }
-
-  private ThreadPoolExecutor initializeBlockMoverThreadPool(int num) {
-    LOG.debug("Block mover to satisfy storage policy; pool threads={}", num);
-
-    ThreadPoolExecutor moverThreadPool = new ThreadPoolExecutor(1, num, 60,
-        TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
-        new Daemon.DaemonFactory() {
-          private final AtomicInteger threadIndex = new AtomicInteger(0);
-
-          @Override
-          public Thread newThread(Runnable r) {
-            Thread t = super.newThread(r);
-            t.setName("BlockMoverTask-" + threadIndex.getAndIncrement());
-            return t;
-          }
-        });
-
-    moverThreadPool.allowCoreThreadTimeOut(true);
-    return moverThreadPool;
-  }
-
-  /**
-   * Handles the given set of block movement tasks. This will iterate over the
-   * block movement list and submit each block movement task asynchronously in a
-   * separate thread. Each task will move the block replica to the target node &
-   * wait for the completion.
-   *
-   * @param blockPoolID block pool identifier
-   *
-   * @param blockMovingInfos
-   *          list of blocks to be moved
-   */
-  public void processBlockMovingTasks(final String blockPoolID,
-      final Collection<BlockMovingInfo> blockMovingInfos) {
-    LOG.debug("Received BlockMovingTasks {}", blockMovingInfos);
-    for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
-      StorageType sourceStorageType = blkMovingInfo.getSourceStorageType();
-      StorageType targetStorageType = blkMovingInfo.getTargetStorageType();
-      assert sourceStorageType != targetStorageType
-          : "Source and Target storage type shouldn't be same!";
-      BlockMovingTask blockMovingTask = new BlockMovingTask(blockPoolID,
-          blkMovingInfo);
-      moverCompletionService.submit(blockMovingTask);
-    }
-  }
-
-  /**
-   * This class encapsulates the process of moving the block replica to the
-   * given target and wait for the response.
-   */
-  private class BlockMovingTask implements
-      Callable<BlockMovementAttemptFinished> {
-    private final String blockPoolID;
-    private final BlockMovingInfo blkMovingInfo;
-
-    BlockMovingTask(String blockPoolID, BlockMovingInfo blkMovInfo) {
-      this.blockPoolID = blockPoolID;
-      this.blkMovingInfo = blkMovInfo;
-    }
-
-    @Override
-    public BlockMovementAttemptFinished call() {
-      BlockMovementStatus status = moveBlock();
-      return new BlockMovementAttemptFinished(blkMovingInfo.getBlock(),
-          blkMovingInfo.getSource(), blkMovingInfo.getTarget(),
-          blkMovingInfo.getTargetStorageType(), status);
-    }
-
-    private BlockMovementStatus moveBlock() {
-      datanode.incrementXmitsInProgress();
-      ExtendedBlock eb = new ExtendedBlock(blockPoolID,
-          blkMovingInfo.getBlock());
-      try {
-        Token<BlockTokenIdentifier> accessToken = datanode.getBlockAccessToken(
-            eb, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE),
-            new StorageType[]{blkMovingInfo.getTargetStorageType()},
-            new String[0]);
-        DataEncryptionKeyFactory keyFactory = datanode
-            .getDataEncryptionKeyFactoryForBlock(eb);
-
-        return blkDispatcher.moveBlock(blkMovingInfo,
-            datanode.getSaslClient(), eb, datanode.newSocket(),
-            keyFactory, accessToken);
-      } catch (IOException e) {
-        // TODO: handle failure retries
-        LOG.warn(
-            "Failed to move block:{} from src:{} to destin:{} to satisfy "
-                + "storageType:{}",
-            blkMovingInfo.getBlock(), blkMovingInfo.getSource(),
-            blkMovingInfo.getTarget(), blkMovingInfo.getTargetStorageType(), e);
-        return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE;
-      } finally {
-        datanode.decrementXmitsInProgress();
-      }
-    }
-  }
-
-  /**
-   * Drop the in-progress SPS work queues.
-   */
-  public void dropSPSWork() {
-    LOG.info("Received request to drop StoragePolicySatisfierWorker queues. "
-        + "So, none of the SPS Worker queued block movements will"
-        + " be scheduled.");
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index af5ab2d..c7a53e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -48,8 +48,6 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.SecurityUtil;
@@ -658,25 +656,6 @@
           final Mover m = new Mover(nnc, conf, retryCount,
               excludedPinnedBlocks);
 
-          boolean spsRunning;
-          try {
-            spsRunning = nnc.getDistributedFileSystem().getClient()
-                .isInternalSatisfierRunning();
-          } catch (RemoteException e) {
-            IOException cause = e.unwrapRemoteException();
-            if (cause instanceof StandbyException) {
-              System.err.println("Skip Standby Namenode. " + nnc.toString());
-              continue;
-            }
-            throw e;
-          }
-          if (spsRunning) {
-            System.err.println("Mover failed due to StoragePolicySatisfier"
-                + " service running inside namenode. Exiting with status "
-                + ExitStatus.SKIPPED_DUE_TO_SPS + "... ");
-            return ExitStatus.SKIPPED_DUE_TO_SPS.getExitCode();
-          }
-
           final ExitStatus r = m.run();
 
           if (r == ExitStatus.SUCCESS) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 3b68979..1cb414d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -32,7 +32,6 @@
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
 import org.apache.hadoop.security.AccessControlException;
 
 import java.io.FileNotFoundException;
@@ -207,17 +206,6 @@
     List<XAttr> newXAttrs = filterINodeXAttrs(existingXAttrs, toRemove,
                                               removedXAttrs);
     if (existingXAttrs.size() != newXAttrs.size()) {
-      for (XAttr xattr : toRemove) {
-        if (XATTR_SATISFY_STORAGE_POLICY
-            .equals(XAttrHelper.getPrefixedName(xattr))) {
-          StoragePolicySatisfyManager spsManager =
-              fsd.getBlockManager().getSPSManager();
-          if (spsManager != null) {
-            spsManager.getInternalSPSService().clearQueue(inode.getId());
-          }
-          break;
-        }
-      }
       XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
       return removedXAttrs;
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index afe90923..7bc9ecc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -209,7 +209,6 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -1363,9 +1362,6 @@
         // Don't want to keep replication queues when not in Active.
         blockManager.clearQueues();
         blockManager.setInitializedReplQueues(false);
-        if (blockManager.getSPSManager() != null) {
-          blockManager.getSPSManager().stopGracefully();
-        }
       }
     } finally {
       writeUnlock("stopActiveServices");
@@ -2275,9 +2271,7 @@
     }
     // checks sps status
     boolean disabled = (blockManager.getSPSManager() == null);
-    if (disabled || (blockManager
-        .getSPSManager().getMode() == StoragePolicySatisfierMode.INTERNAL
-        && !blockManager.getSPSManager().isInternalSatisfierRunning())) {
+    if (disabled) {
       throw new UnsupportedActionException(
           "Cannot request to satisfy storage policy "
               + "when storage policy satisfier feature has been disabled"
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 2f3325f..318f801 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -111,7 +111,6 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -2534,41 +2533,6 @@
   }
 
   @Override
-  public boolean isInternalSatisfierRunning() throws IOException {
-    checkNNStartup();
-    String operationName = "isInternalSatisfierRunning";
-    namesystem.checkSuperuserPrivilege(operationName);
-    if (nn.isStandbyState()) {
-      throw new StandbyException("Not supported by Standby Namenode.");
-    }
-    StoragePolicySatisfyManager spsMgr =
-        namesystem.getBlockManager().getSPSManager();
-    boolean isInternalSatisfierRunning = (spsMgr != null
-        ? spsMgr.isInternalSatisfierRunning() : false);
-    namesystem.logAuditEvent(true, operationName, null);
-    return isInternalSatisfierRunning;
-  }
-
-  @Override
-  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
-      String path) throws IOException {
-    checkNNStartup();
-    if (nn.isStandbyState()) {
-      throw new StandbyException("Not supported by Standby Namenode.");
-    }
-    if (namesystem.getBlockManager().getSPSManager() == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Satisfier is not running inside namenode, so status "
-            + "can't be returned.");
-      }
-      throw new IOException("Satisfier is not running inside namenode, "
-          + "so status can't be returned.");
-    }
-    return namesystem.getBlockManager().getSPSManager()
-        .checkStoragePolicySatisfyPathStatus(path);
-  }
-
-  @Override
   public Long getNextSPSPath() throws IOException {
     checkNNStartup();
     String operationName = "getNextSPSPath";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index c95dcda..b990bc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -23,14 +23,10 @@
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Queue;
-import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,9 +56,6 @@
   private final Map<Long, DirPendingWorkInfo> pendingWorkForDirectory =
       new HashMap<>();
 
-  private final Map<Long, StoragePolicySatisfyPathStatusInfo> spsStatus =
-      new ConcurrentHashMap<>();
-
   private final Context ctxt;
 
   private Daemon pathIdCollector;
@@ -86,9 +79,6 @@
    *          - track info for satisfy the policy
    */
   public synchronized void add(ItemInfo trackInfo) {
-    spsStatus.put(trackInfo.getFile(),
-        new StoragePolicySatisfyPathStatusInfo(
-            StoragePolicySatisfyPathStatus.IN_PROGRESS));
     storageMovementNeeded.add(trackInfo);
   }
 
@@ -129,7 +119,7 @@
     if (itemInfo.getStartPath() == itemInfo.getFile()) {
       return;
     }
-    updatePendingDirScanStats(itemInfo.getFile(), 1, scanCompleted);
+    updatePendingDirScanStats(itemInfo.getStartPath(), 1, scanCompleted);
   }
 
   private void updatePendingDirScanStats(long startPath, int numScannedFiles,
@@ -181,7 +171,6 @@
       if (!ctxt.isFileExist(startId)) {
         // directory deleted just remove it.
         this.pendingWorkForDirectory.remove(startId);
-        updateStatus(startId, isSuccess);
       } else {
         DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
         if (pendingWork != null) {
@@ -189,17 +178,13 @@
           if (pendingWork.isDirWorkDone()) {
             ctxt.removeSPSHint(startId);
             pendingWorkForDirectory.remove(startId);
-            pendingWork.setFailure(!isSuccess);
-            updateStatus(startId, pendingWork.isPolicySatisfied());
           }
-          pendingWork.setFailure(isSuccess);
         }
       }
     } else {
       // Remove xAttr if trackID doesn't exist in
       // storageMovementAttemptedItems or file policy satisfied.
       ctxt.removeSPSHint(trackInfo.getFile());
-      updateStatus(trackInfo.getFile(), isSuccess);
     }
   }
 
@@ -216,24 +201,6 @@
   }
 
   /**
-   * Mark inode status as SUCCESS in map.
-   */
-  private void updateStatus(long startId, boolean isSuccess){
-    StoragePolicySatisfyPathStatusInfo spsStatusInfo =
-        spsStatus.get(startId);
-    if (spsStatusInfo == null) {
-      spsStatusInfo = new StoragePolicySatisfyPathStatusInfo();
-      spsStatus.put(startId, spsStatusInfo);
-    }
-
-    if (isSuccess) {
-      spsStatusInfo.setSuccess();
-    } else {
-      spsStatusInfo.setFailure();
-    }
-  }
-
-  /**
    * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
    * and notify to clean up required resources.
    * @throws IOException
@@ -277,7 +244,6 @@
     @Override
     public void run() {
       LOG.info("Starting SPSPathIdProcessor!.");
-      long lastStatusCleanTime = 0;
       Long startINode = null;
       while (ctxt.isRunning()) {
         try {
@@ -289,9 +255,6 @@
               // Waiting for SPS path
               Thread.sleep(3000);
             } else {
-              spsStatus.put(startINode,
-                  new StoragePolicySatisfyPathStatusInfo(
-                      StoragePolicySatisfyPathStatus.IN_PROGRESS));
               ctxt.scanAndCollectFiles(startINode);
               // check if directory was empty and no child added to queue
               DirPendingWorkInfo dirPendingWorkInfo =
@@ -300,15 +263,8 @@
                   && dirPendingWorkInfo.isDirWorkDone()) {
                 ctxt.removeSPSHint(startINode);
                 pendingWorkForDirectory.remove(startINode);
-                updateStatus(startINode, true);
               }
             }
-            //Clear the SPS status if status is in SUCCESS more than 5 min.
-            if (Time.monotonicNow()
-                - lastStatusCleanTime > statusClearanceElapsedTimeMs) {
-              lastStatusCleanTime = Time.monotonicNow();
-              cleanSPSStatus();
-            }
             startINode = null; // Current inode successfully scanned.
           }
         } catch (Throwable t) {
@@ -328,16 +284,6 @@
         }
       }
     }
-
-    private synchronized void cleanSPSStatus() {
-      for (Iterator<Entry<Long, StoragePolicySatisfyPathStatusInfo>> it =
-          spsStatus.entrySet().iterator(); it.hasNext();) {
-        Entry<Long, StoragePolicySatisfyPathStatusInfo> entry = it.next();
-        if (entry.getValue().canRemove()) {
-          it.remove();
-        }
-      }
-    }
   }
 
   /**
@@ -347,7 +293,6 @@
 
     private int pendingWorkCount = 0;
     private boolean fullyScanned = false;
-    private boolean success = true;
 
     /**
      * Increment the pending work count for directory.
@@ -378,20 +323,6 @@
     public synchronized void markScanCompleted() {
       this.fullyScanned = true;
     }
-
-    /**
-     * Return true if all the files block movement is success, otherwise false.
-     */
-    public boolean isPolicySatisfied() {
-      return success;
-    }
-
-    /**
-     * Set directory SPS status failed.
-     */
-    public void setFailure(boolean failure) {
-      this.success = this.success || failure;
-    }
   }
 
   public void activate() {
@@ -406,56 +337,6 @@
     }
   }
 
-  /**
-   * Represent the file/directory block movement status.
-   */
-  static class StoragePolicySatisfyPathStatusInfo {
-    private StoragePolicySatisfyPathStatus status =
-        StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
-    private long lastStatusUpdateTime;
-
-    StoragePolicySatisfyPathStatusInfo() {
-      this.lastStatusUpdateTime = 0;
-    }
-
-    StoragePolicySatisfyPathStatusInfo(StoragePolicySatisfyPathStatus status) {
-      this.status = status;
-      this.lastStatusUpdateTime = 0;
-    }
-
-    private void setSuccess() {
-      this.status = StoragePolicySatisfyPathStatus.SUCCESS;
-      this.lastStatusUpdateTime = Time.monotonicNow();
-    }
-
-    private void setFailure() {
-      this.status = StoragePolicySatisfyPathStatus.FAILURE;
-      this.lastStatusUpdateTime = Time.monotonicNow();
-    }
-
-    private StoragePolicySatisfyPathStatus getStatus() {
-      return status;
-    }
-
-    /**
-     * Return true if SUCCESS status cached more then 5 min.
-     */
-    private boolean canRemove() {
-      return (StoragePolicySatisfyPathStatus.SUCCESS == status
-          || StoragePolicySatisfyPathStatus.FAILURE == status)
-          && (Time.monotonicNow()
-              - lastStatusUpdateTime) > statusClearanceElapsedTimeMs;
-    }
-  }
-
-  public StoragePolicySatisfyPathStatus getStatus(long id) {
-    StoragePolicySatisfyPathStatusInfo spsStatusInfo = spsStatus.get(id);
-    if(spsStatusInfo == null){
-      return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
-    }
-    return spsStatusInfo.getStatus();
-  }
-
   @VisibleForTesting
   public static void setStatusClearanceElapsedTimeMs(
       long statusClearanceElapsedTimeMs) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
index d538374..afa5a50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
@@ -94,11 +94,6 @@
   BlockStoragePolicy getStoragePolicy(byte policyId);
 
   /**
-   * Drop the SPS work in case if any previous work queued up.
-   */
-  void addDropPreviousSPSWorkAtDNs();
-
-  /**
    * Remove the hint which was added to track SPS call.
    *
    * @param spsPath
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
deleted file mode 100644
index d6e92d2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-
-/**
- * This class handles the internal SPS block movements. This will assign block
- * movement tasks to target datanode descriptors.
- */
-@InterfaceAudience.Private
-public class IntraSPSNameNodeBlockMoveTaskHandler
-    implements BlockMoveTaskHandler {
-
-  private BlockManager blockManager;
-  private Namesystem namesystem;
-
-  public IntraSPSNameNodeBlockMoveTaskHandler(BlockManager blockManager,
-      Namesystem namesytem) {
-    this.blockManager = blockManager;
-    this.namesystem = namesytem;
-  }
-
-  @Override
-  public void submitMoveTask(BlockMovingInfo blkMovingInfo) throws IOException {
-    namesystem.readLock();
-    try {
-      DatanodeDescriptor dn = blockManager.getDatanodeManager()
-          .getDatanode(blkMovingInfo.getTarget().getDatanodeUuid());
-      if (dn == null) {
-        throw new IOException("Failed to schedule block movement task:"
-            + blkMovingInfo + " as target datanode: "
-            + blkMovingInfo.getTarget() + " doesn't exists");
-      }
-      dn.incrementBlocksScheduled(blkMovingInfo.getTargetStorageType());
-      dn.addBlocksToMoveStorage(blkMovingInfo);
-    } finally {
-      namesystem.readUnlock();
-    }
-  }
-}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
deleted file mode 100644
index 2bf4810..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
-import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.DatanodeMap;
-import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.AccessControlException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is the Namenode implementation for analyzing the file blocks which
- * are expecting to change its storages and assigning the block storage
- * movements to satisfy the storage policy.
- */
-@InterfaceAudience.Private
-public class IntraSPSNameNodeContext implements Context {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(IntraSPSNameNodeContext.class);
-
-  private final Namesystem namesystem;
-  private final BlockManager blockManager;
-
-  private SPSService service;
-  private final FileCollector fileCollector;
-  private final BlockMoveTaskHandler blockMoveTaskHandler;
-
-  public IntraSPSNameNodeContext(Namesystem namesystem,
-      BlockManager blockManager, SPSService service) {
-    this.namesystem = namesystem;
-    this.blockManager = blockManager;
-    this.service = service;
-    fileCollector = new IntraSPSNameNodeFileIdCollector(
-        namesystem.getFSDirectory(), service);
-    blockMoveTaskHandler = new IntraSPSNameNodeBlockMoveTaskHandler(
-        blockManager, namesystem);
-  }
-
-  @Override
-  public int getNumLiveDataNodes() {
-    return blockManager.getDatanodeManager().getNumLiveDataNodes();
-  }
-
-  /**
-   * @return object containing information regarding the file.
-   */
-  @Override
-  public HdfsFileStatus getFileInfo(long inodeID) throws IOException {
-    Path filePath = DFSUtilClient.makePathFromFileId(inodeID);
-    return namesystem.getFileInfo(filePath.toString(), true, true);
-  }
-
-  @Override
-  public DatanodeStorageReport[] getLiveDatanodeStorageReport()
-      throws IOException {
-    namesystem.readLock();
-    try {
-      return blockManager.getDatanodeManager()
-          .getDatanodeStorageReport(DatanodeReportType.LIVE);
-    } finally {
-      namesystem.readUnlock();
-    }
-  }
-
-  @Override
-  public boolean isFileExist(long inodeId) {
-    return namesystem.getFSDirectory().getInode(inodeId) != null;
-  }
-
-  @Override
-  public void removeSPSHint(long inodeId) throws IOException {
-    this.namesystem.removeXattr(inodeId, XATTR_SATISFY_STORAGE_POLICY);
-  }
-
-  @Override
-  public boolean isRunning() {
-    return namesystem.isRunning() && service.isRunning();
-  }
-
-  @Override
-  public boolean isInSafeMode() {
-    return namesystem.isInSafeMode();
-  }
-
-  @Override
-  public boolean isMoverRunning() {
-    String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
-    return namesystem.isFileOpenedForWrite(moverId);
-  }
-
-  @Override
-  public void addDropPreviousSPSWorkAtDNs() {
-    namesystem.readLock();
-    try {
-      blockManager.getDatanodeManager().addDropSPSWorkCommandsToAllDNs();
-    } finally {
-      namesystem.readUnlock();
-    }
-  }
-
-  @Override
-  public BlockStoragePolicy getStoragePolicy(byte policyID) {
-    return blockManager.getStoragePolicy(policyID);
-  }
-
-  @Override
-  public NetworkTopology getNetworkTopology(DatanodeMap datanodeMap) {
-    return blockManager.getDatanodeManager().getNetworkTopology();
-  }
-
-  @Override
-  public long getFileID(String path) throws UnresolvedLinkException,
-      AccessControlException, ParentNotDirectoryException {
-    namesystem.readLock();
-    try {
-      INode inode = namesystem.getFSDirectory().getINode(path);
-      return inode == null ? -1 : inode.getId();
-    } finally {
-      namesystem.readUnlock();
-    }
-  }
-
-  @Override
-  public Long getNextSPSPath() {
-    return blockManager.getSPSManager().getNextPathId();
-  }
-
-  @Override
-  public void removeSPSPathId(long trackId) {
-    blockManager.getSPSManager().removePathId(trackId);
-  }
-
-  @Override
-  public void removeAllSPSPathIds() {
-    blockManager.getSPSManager().removeAllPathIds();
-  }
-
-  @Override
-  public void scanAndCollectFiles(long filePath)
-      throws IOException, InterruptedException {
-    fileCollector.scanAndCollectFiles(filePath);
-  }
-
-  @Override
-  public void submitMoveTask(BlockMovingInfo blkMovingInfo) throws IOException {
-    blockMoveTaskHandler.submitMoveTask(blkMovingInfo);
-  }
-
-  @Override
-  public void notifyMovementTriedBlocks(Block[] moveAttemptFinishedBlks) {
-    LOG.info("Movement attempted blocks: {}",
-        Arrays.asList(moveAttemptFinishedBlks));
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeFileIdCollector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeFileIdCollector.java
deleted file mode 100644
index 0473b9d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeFileIdCollector.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
-import org.apache.hadoop.hdfs.server.namenode.INode;
-
-/**
- * A specific implementation for scanning the directory with Namenode internal
- * Inode structure and collects the file ids under the given directory ID.
- */
-@InterfaceAudience.Private
-public class IntraSPSNameNodeFileIdCollector extends FSTreeTraverser
-    implements FileCollector {
-  private int maxQueueLimitToScan;
-  private final SPSService service;
-
-  private int remainingCapacity = 0;
-
-  private List<ItemInfo> currentBatch;
-
-  public IntraSPSNameNodeFileIdCollector(FSDirectory dir,
-      SPSService service) {
-    super(dir, service.getConf());
-    this.service = service;
-    this.maxQueueLimitToScan = service.getConf().getInt(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY,
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT);
-    currentBatch = new ArrayList<>(maxQueueLimitToScan);
-  }
-
-  @Override
-  protected boolean processFileInode(INode inode, TraverseInfo traverseInfo)
-      throws IOException, InterruptedException {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Processing {} for statisy the policy",
-          inode.getFullPathName());
-    }
-    if (!inode.isFile()) {
-      return false;
-    }
-    if (inode.isFile() && inode.asFile().numBlocks() != 0) {
-      currentBatch.add(new ItemInfo(
-          ((SPSTraverseInfo) traverseInfo).getStartId(), inode.getId()));
-      remainingCapacity--;
-    }
-    return true;
-  }
-
-  @Override
-  protected boolean shouldSubmitCurrentBatch() {
-    return remainingCapacity <= 0;
-  }
-
-  @Override
-  protected void checkINodeReady(long startId) throws IOException {
-    // SPS work won't be scheduled if NN is in standby. So, skipping NN
-    // standby check.
-    return;
-  }
-
-  @Override
-  protected void submitCurrentBatch(Long startId)
-      throws IOException, InterruptedException {
-    // Add current child's to queue
-    service.addAllFilesToProcess(startId,
-        currentBatch, false);
-    currentBatch.clear();
-  }
-
-  @Override
-  protected void throttle() throws InterruptedException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("StorageMovementNeeded queue remaining capacity is zero,"
-          + " waiting for some free slots.");
-    }
-    remainingCapacity = remainingCapacity();
-    // wait for queue to be free
-    while (remainingCapacity <= 0) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Waiting for storageMovementNeeded queue to be free!");
-      }
-      Thread.sleep(5000);
-      remainingCapacity = remainingCapacity();
-    }
-  }
-
-  @Override
-  protected boolean canTraverseDir(INode inode) throws IOException {
-    return true;
-  }
-
-  @Override
-  protected void checkPauseForTesting() throws InterruptedException {
-    // Nothing to do
-  }
-
-  @Override
-  public void scanAndCollectFiles(final long startINodeId)
-      throws IOException, InterruptedException {
-    FSDirectory fsd = getFSDirectory();
-    INode startInode = fsd.getInode(startINodeId);
-    if (startInode != null) {
-      remainingCapacity = remainingCapacity();
-      if (remainingCapacity == 0) {
-        throttle();
-      }
-      if (startInode.isFile()) {
-        currentBatch
-            .add(new ItemInfo(startInode.getId(), startInode.getId()));
-      } else {
-        readLock();
-        // NOTE: this lock will not be held for full directory scanning. It is
-        // basically a sliced locking. Once it collects a batch size( at max the
-        // size of maxQueueLimitToScan (default 1000)) file ids, then it will
-        // unlock and submits the current batch to SPSService. Once
-        // service.processingQueueSize() shows empty slots, then lock will be
-        // re-acquired and scan will be resumed. This logic was re-used from
-        // EDEK feature.
-        try {
-          traverseDir(startInode.asDirectory(), startINodeId,
-              HdfsFileStatus.EMPTY_NAME, new SPSTraverseInfo(startINodeId));
-        } finally {
-          readUnlock();
-        }
-      }
-      // Mark startInode traverse is done, this is last-batch
-      service.addAllFilesToProcess(startInode.getId(), currentBatch, true);
-      currentBatch.clear();
-    }
-  }
-
-  /**
-   * Returns queue remaining capacity.
-   */
-  public synchronized int remainingCapacity() {
-    int size = service.processingQueueSize();
-    int remainingSize = 0;
-    if (size < maxQueueLimitToScan) {
-      remainingSize = maxQueueLimitToScan - size;
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("SPS processing Q -> maximum capacity:{}, current size:{},"
-          + " remaining size:{}", maxQueueLimitToScan, size, remainingSize);
-    }
-    return remainingSize;
-  }
-
-  class SPSTraverseInfo extends TraverseInfo {
-    private long startId;
-
-    SPSTraverseInfo(long startId) {
-      this.startId = startId;
-    }
-
-    public long getStartId() {
-      return startId;
-    }
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
index 86634d8..a62dd93 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
@@ -102,11 +102,6 @@
   int processingQueueSize();
 
   /**
-   * Clear inodeId present in the processing queue.
-   */
-  void clearQueue(long spsPath);
-
-  /**
    * @return the configuration.
    */
   Configuration getConf();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
index 4af6c8f..7ebd23d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
@@ -43,14 +43,12 @@
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.balancer.Matcher;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
@@ -159,15 +157,6 @@
           serviceMode);
       return;
     }
-    if (serviceMode == StoragePolicySatisfierMode.INTERNAL
-        && ctxt.isMoverRunning()) {
-      isRunning = false;
-      LOG.error(
-          "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-              + HdfsServerConstants.MOVER_ID_PATH.toString()
-              + " been opened. Maybe a Mover instance is running!");
-      return;
-    }
     if (reconfigStart) {
       LOG.info("Starting {} StoragePolicySatisfier, as admin requested to "
           + "start it.", StringUtils.toLowerCase(serviceMode.toString()));
@@ -177,9 +166,6 @@
     }
 
     isRunning = true;
-    // Ensure that all the previously submitted block movements(if any) have to
-    // be stopped in all datanodes.
-    addDropSPSWorkCommandsToAllDNs();
     storagePolicySatisfierThread = new Daemon(this);
     storagePolicySatisfierThread.setName("StoragePolicySatisfier");
     storagePolicySatisfierThread.start();
@@ -201,7 +187,6 @@
     this.storageMovementsMonitor.stop();
     if (forceStop) {
       storageMovementNeeded.clearQueuesWithNotification();
-      addDropSPSWorkCommandsToAllDNs();
     } else {
       LOG.info("Stopping StoragePolicySatisfier.");
     }
@@ -234,14 +219,6 @@
     return isRunning;
   }
 
-  /**
-   * Adding drop commands to all datanodes to stop performing the satisfier
-   * block movements, if any.
-   */
-  private void addDropSPSWorkCommandsToAllDNs() {
-    ctxt.addDropPreviousSPSWorkAtDNs();
-  }
-
   @Override
   public void run() {
     while (isRunning) {
@@ -1101,13 +1078,6 @@
   }
 
   /**
-   * Clear queues for given track id.
-   */
-  public void clearQueue(long trackId) {
-    storageMovementNeeded.clearQueue(trackId);
-  }
-
-  /**
    * This class contains information of an attempted blocks and its last
    * attempted or reported time stamp. This is used by
    * {@link BlockStorageMovementAttemptedItems#storageMovementAttemptedItems}.
@@ -1158,20 +1128,6 @@
     }
   }
 
-  /**
-   * Returns sps invoked path status. This method is used by internal satisfy
-   * storage policy service.
-   *
-   * @param path
-   *          sps path
-   * @return storage policy satisfy path status
-   * @throws IOException
-   */
-  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
-      String path) throws IOException {
-    return storageMovementNeeded.getStatus(ctxt.getFileID(path));
-  }
-
   @Override
   public void addFileToProcess(ItemInfo trackInfo, boolean scanCompleted) {
     storageMovementNeeded.add(trackInfo, scanCompleted);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
index 0507d6b..074eab6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
@@ -18,30 +18,27 @@
 package org.apache.hadoop.hdfs.server.namenode.sps;
 
 import java.io.IOException;
+import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.Queue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.sps.ExternalStoragePolicySatisfier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * This manages satisfy storage policy invoked path ids and expose methods to
- * process these path ids. It maintains sps mode(INTERNAL/EXTERNAL/NONE)
+ * process these path ids. It maintains sps mode(EXTERNAL/NONE)
  * configured by the administrator.
  *
  * <p>
- * If the configured mode is {@link StoragePolicySatisfierMode.INTERNAL}, then
- * it will start internal sps daemon service inside namenode and process sps
- * invoked path ids to satisfy the storage policy.
- *
- * <p>
  * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then
  * it won't do anything, just maintains the sps invoked path ids. Administrator
  * requires to start external sps service explicitly, to fetch the sps invoked
@@ -66,10 +63,9 @@
   private final Queue<Long> pathsToBeTraveresed;
   private final int outstandingPathsLimit;
   private final Namesystem namesystem;
-  private final BlockManager blkMgr;
 
-  public StoragePolicySatisfyManager(Configuration conf, Namesystem namesystem,
-      BlockManager blkMgr) {
+  public StoragePolicySatisfyManager(Configuration conf,
+      Namesystem namesystem) {
     // StoragePolicySatisfier(SPS) configs
     storagePolicyEnabled = conf.getBoolean(
         DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
@@ -82,21 +78,16 @@
         DFSConfigKeys.DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT);
     mode = StoragePolicySatisfierMode.fromString(modeVal);
     pathsToBeTraveresed = new LinkedList<Long>();
+    this.namesystem = namesystem;
     // instantiate SPS service by just keeps config reference and not starting
     // any supporting threads.
     spsService = new StoragePolicySatisfier(conf);
-    this.namesystem = namesystem;
-    this.blkMgr = blkMgr;
   }
 
   /**
    * This function will do following logic based on the configured sps mode:
    *
    * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.INTERNAL}, then
-   * starts internal daemon service inside namenode.
-   *
-   * <p>
    * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then
    * it won't do anything. Administrator requires to start external sps service
    * explicitly.
@@ -113,17 +104,6 @@
     }
 
     switch (mode) {
-    case INTERNAL:
-      if (spsService.isRunning()) {
-        LOG.info("Storage policy satisfier is already running"
-            + " as internal daemon service inside namenode.");
-        return;
-      }
-      // starts internal daemon service inside namenode
-      spsService.init(
-          new IntraSPSNameNodeContext(namesystem, blkMgr, spsService));
-      spsService.start(false, mode);
-      break;
     case EXTERNAL:
       LOG.info("Storage policy satisfier is configured as external, "
           + "please start external sps service explicitly to satisfy policy");
@@ -141,10 +121,6 @@
    * This function will do following logic based on the configured sps mode:
    *
    * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.INTERNAL}, then
-   * stops internal daemon service inside namenode.
-   *
-   * <p>
    * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then
    * it won't do anything. Administrator requires to stop external sps service
    * explicitly, if needed.
@@ -162,16 +138,6 @@
     }
 
     switch (mode) {
-    case INTERNAL:
-      removeAllPathIds();
-      if (!spsService.isRunning()) {
-        LOG.info("Internal storage policy satisfier daemon service"
-            + " is not running");
-        return;
-      }
-      // stops internal daemon service running inside namenode
-      spsService.stop(false);
-      break;
     case EXTERNAL:
       removeAllPathIds();
       if (LOG.isDebugEnabled()) {
@@ -194,11 +160,8 @@
   }
 
   /**
-   * Sets new sps mode. If the new mode is internal, then it will start internal
-   * sps service inside namenode. If the new mode is external, then stops
-   * internal sps service running(if any) inside namenode. If the new mode is
-   * none, then it will disable the sps feature completely by clearing all
-   * queued up sps path's hint.
+   * Sets new sps mode. If the new mode is none, then it will disable the sps
+   * feature completely by clearing all queued up sps path's hint.
    */
   public void changeModeEvent(StoragePolicySatisfierMode newMode) {
     if (!storagePolicyEnabled) {
@@ -212,16 +175,6 @@
     }
 
     switch (newMode) {
-    case INTERNAL:
-      if (spsService.isRunning()) {
-        LOG.info("Storage policy satisfier is already running as {} mode.",
-            mode);
-        return;
-      }
-      spsService.init(new IntraSPSNameNodeContext(this.namesystem, this.blkMgr,
-          spsService));
-      spsService.start(true, newMode);
-      break;
     case EXTERNAL:
       if (mode == newMode) {
         LOG.info("Storage policy satisfier is already in mode:{},"
@@ -238,7 +191,7 @@
       }
       LOG.info("Disabling StoragePolicySatisfier, mode:{}", newMode);
       spsService.stop(true);
-      removeAllPathIds();
+      clearPathIds();
       break;
     default:
       if (LOG.isDebugEnabled()) {
@@ -252,77 +205,15 @@
   }
 
   /**
-   * This function will do following logic based on the configured sps mode:
-   *
-   * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.INTERNAL}, then
-   * timed wait to stop internal storage policy satisfier daemon threads.
-   *
-   * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then
-   * it won't do anything, just ignore it.
-   *
-   * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the
-   * service is disabled. It won't do any action, just ignore it.
-   */
-  public void stopGracefully() {
-    switch (mode) {
-    case INTERNAL:
-      spsService.stopGracefully();
-      break;
-    case EXTERNAL:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Ignoring, StoragePolicySatisfier feature is running"
-            + " outside namenode");
-      }
-      break;
-    case NONE:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Ignoring, StoragePolicySatisfier feature is disabled");
-      }
-      break;
-    default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Invalid mode:{}", mode);
-      }
-      break;
-    }
-  }
-
-  /**
    * @return true if the internal storage policy satisfier daemon is running,
    *         false otherwise.
    */
-  public boolean isInternalSatisfierRunning() {
+  @VisibleForTesting
+  public boolean isSatisfierRunning() {
     return spsService.isRunning();
   }
 
   /**
-   * @return internal SPS service instance.
-   */
-  public SPSService getInternalSPSService() {
-    return this.spsService;
-  }
-
-  /**
-   * @return status Storage policy satisfy status of the path. It is supported
-   *         only for the internal sps daemon service.
-   * @throws IOException
-   *           if the Satisfier is not running inside namenode.
-   */
-  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
-      String path) throws IOException {
-    if (mode != StoragePolicySatisfierMode.INTERNAL) {
-      LOG.debug("Satisfier is not running inside namenode, so status "
-          + "can't be returned.");
-      throw new IOException("Satisfier is not running inside namenode, "
-          + "so status can't be returned.");
-    }
-    return spsService.checkStoragePolicySatisfyPathStatus(path);
-  }
-
-  /**
    * @return the next SPS path id, on which path users has invoked to satisfy
    *         storages.
    */
@@ -348,10 +239,22 @@
 
   /**
    * Removes the SPS path id from the list of sps paths.
+   *
+   * @throws IOException
    */
-  public void removePathId(long trackId) {
+  private void clearPathIds(){
     synchronized (pathsToBeTraveresed) {
-      pathsToBeTraveresed.remove(trackId);
+      Iterator<Long> iterator = pathsToBeTraveresed.iterator();
+      while (iterator.hasNext()) {
+        Long trackId = iterator.next();
+        try {
+          namesystem.removeXattr(trackId,
+              HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY);
+        } catch (IOException e) {
+          LOG.debug("Failed to remove sps xatttr!", e);
+        }
+        iterator.remove();
+      }
     }
   }
 
@@ -374,12 +277,11 @@
   }
 
   /**
-   * @return true if sps is configured as an internal service or external
+   * @return true if sps is configured as an external
    *         service, false otherwise.
    */
   public boolean isEnabled() {
-    return mode == StoragePolicySatisfierMode.INTERNAL
-        || mode == StoragePolicySatisfierMode.EXTERNAL;
+    return mode == StoragePolicySatisfierMode.EXTERNAL;
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
index 189bc2b..3293035 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
@@ -150,11 +150,6 @@
   }
 
   @Override
-  public void addDropPreviousSPSWorkAtDNs() {
-    // Nothing todo
-  }
-
-  @Override
   public void removeSPSHint(long inodeId) throws IOException {
     Path filePath = DFSUtilClient.makePathFromFileId(inodeId);
     nnc.getDistributedFileSystem().removeXAttr(filePath,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
index af90f0d8..8e19a7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
@@ -68,15 +68,6 @@
       StoragePolicySatisfier sps = new StoragePolicySatisfier(spsConf);
       nnc = getNameNodeConnector(spsConf);
 
-      boolean spsRunning;
-      spsRunning = nnc.getDistributedFileSystem().getClient()
-          .isInternalSatisfierRunning();
-      if (spsRunning) {
-        throw new RuntimeException(
-            "Startup failed due to StoragePolicySatisfier"
-                + " running inside Namenode.");
-      }
-
       ExternalSPSContext context = new ExternalSPSContext(sps, nnc);
       sps.init(context);
       sps.start(true, StoragePolicySatisfierMode.EXTERNAL);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index d8392fa..e02208c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -26,7 +26,6 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
@@ -34,7 +33,6 @@
 import org.apache.hadoop.util.ToolRunner;
 
 import java.io.FileNotFoundException;
-import com.google.common.base.Joiner;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -297,88 +295,6 @@
         dfs.satisfyStoragePolicy(new Path(path));
         System.out.println("Scheduled blocks to move based on the current"
             + " storage policy on " + path);
-        boolean waitOpt = StringUtils.popOption("-w", args);
-        if (waitOpt) {
-          waitForSatisfyPolicy(dfs, path);
-        }
-      } catch (Exception e) {
-        System.err.println(AdminHelper.prettifyException(e));
-        return 2;
-      }
-      return 0;
-    }
-
-    private void waitForSatisfyPolicy(DistributedFileSystem dfs, String path)
-        throws IOException {
-      System.out.println("Waiting for satisfy the policy ...");
-      boolean running = true;
-      while (running) {
-        StoragePolicySatisfyPathStatus status = dfs.getClient()
-            .checkStoragePolicySatisfyPathStatus(path);
-        switch (status) {
-        case SUCCESS:
-        case FAILURE:
-        case NOT_AVAILABLE:
-          System.out.println(status);
-          running = false;
-          break;
-        case PENDING:
-        case IN_PROGRESS:
-          System.out.println(status);
-        default:
-          System.err.println("Unexpected storage policy satisfyer status,"
-              + " Exiting");
-          running = false;
-          break;
-        }
-
-        try {
-          Thread.sleep(10000);
-        } catch (InterruptedException e) {
-        }
-      }
-      System.out.println(" done");
-    }
-  }
-
-  /**
-   * Command to check storage policy satisfier status running internal(inside)
-   * Namenode.
-   */
-  private static class IsInternalSatisfierRunningCommand
-      implements AdminHelper.Command {
-    @Override
-    public String getName() {
-      return "-isInternalSatisfierRunning";
-    }
-
-    @Override
-    public String getShortUsage() {
-      return "[" + getName() + "]\n";
-    }
-
-    @Override
-    public String getLongUsage() {
-      return getShortUsage() + "\n"
-          + "Check the status of Storage Policy Statisfier"
-          + " running inside Namenode.\n\n";
-    }
-
-    @Override
-    public int run(Configuration conf, List<String> args) throws IOException {
-      if (!args.isEmpty()) {
-        System.err.print("Can't understand arguments: "
-            + Joiner.on(" ").join(args) + "\n");
-        System.err.println("Usage is " + getLongUsage());
-        return 1;
-      }
-      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
-      try {
-        if(dfs.getClient().isInternalSatisfierRunning()){
-          System.out.println("yes");
-        }else{
-          System.out.println("no");
-        }
       } catch (Exception e) {
         System.err.println(AdminHelper.prettifyException(e));
         return 2;
@@ -438,7 +354,6 @@
       new SetStoragePolicyCommand(),
       new GetStoragePolicyCommand(),
       new UnsetStoragePolicyCommand(),
-      new SatisfyStoragePolicyCommand(),
-      new IsInternalSatisfierRunningCommand()
+      new SatisfyStoragePolicyCommand()
   };
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index baf7ec7..4a8f9f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -60,8 +60,6 @@
     NullDatanodeCommand = 7;
     BlockIdCommand = 8;
     BlockECReconstructionCommand = 9;
-    BlockStorageMovementCommand = 10;
-    DropSPSWorkCommand = 11;
   }
 
   required Type cmdType = 1;    // Type of the command
@@ -76,8 +74,6 @@
   optional RegisterCommandProto registerCmd = 7;
   optional BlockIdCommandProto blkIdCmd = 8;
   optional BlockECReconstructionCommandProto blkECReconstructionCmd = 9;
-  optional BlockStorageMovementCommandProto blkStorageMovementCmd = 10;
-  optional DropSPSWorkCommandProto dropSPSWorkCmd = 11;
 }
 
 /**
@@ -158,32 +154,6 @@
   repeated BlockECReconstructionInfoProto blockECReconstructioninfo = 1;
 }
 
- /**
- * Block storage movement command
- */
-message BlockStorageMovementCommandProto {
-  required string blockPoolId = 1;
-  repeated BlockMovingInfoProto blockMovingInfo = 2;
-}
-
-/**
- * Instruct datanode to drop SPS work queues
- */
-message DropSPSWorkCommandProto {
-  // void
-}
-
-/**
- * Block storage movement information
- */
-message BlockMovingInfoProto {
-  required BlockProto block = 1;
-  required DatanodeInfoProto sourceDnInfo = 2;
-  required DatanodeInfoProto targetDnInfo = 3;
-  required StorageTypeProto sourceStorageType = 4;
-  required StorageTypeProto targetStorageType = 5;
-}
-
 /**
  * registration - Information of the datanode registering with the namenode
  */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 0b533c2..f720d0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4501,8 +4501,7 @@
   <name>dfs.storage.policy.satisfier.mode</name>
   <value>none</value>
   <description>
-    Following values are supported - internal, external, none.
-    If internal, StoragePolicySatisfier will be enabled and started along with active namenode.
+    Following values are supported - external, none.
     If external, StoragePolicySatisfier will be enabled and started as an independent service outside namenode.
     If none, StoragePolicySatisfier is disabled.
     By default, StoragePolicySatisfier is disabled.
@@ -4561,17 +4560,6 @@
 </property>
 
 <property>
-  <name>dfs.storage.policy.satisfier.low.max-streams.preference</name>
-  <value>true</value>
-  <description>
-    If false, blocks to move tasks will share equal ratio of number of highest-priority
-    replication streams (dfs.namenode.replication.max-streams) with pending replica and
-    erasure-coded reconstruction tasks. If true, blocks to move tasks will only use
-    the delta number of replication streams. The default value is true.
-  </description>
-</property>
-
-<property>
   <name>dfs.storage.policy.satisfier.retry.max.attempts</name>
   <value>3</value>
   <description>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 5872ef8..3789779 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -106,9 +106,9 @@
 ### <u>S</u>torage <u>P</u>olicy <u>S</u>atisfier (SPS)
 
 When user changes the storage policy on a file/directory, user can call `HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new policy set.
-The SPS daemon thread runs along with namenode and periodically scans for the storage mismatches between new policy set and the physical blocks placed. This will only track the files/directories for which user invoked satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, then it will schedule block movement tasks to datanodes. If there are any failures in movement, the SPS will re-attempt by sending new block movement tasks.
+The SPS tool running external to namenode periodically scans for the storage mismatches between new policy set and the physical blocks placed. This will only track the files/directories for which user invoked satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, then it will schedule block movement tasks to datanodes. If there are any failures in movement, the SPS will re-attempt by sending new block movement tasks.
 
-SPS can be enabled as internal service to Namenode or as an external service outside Namenode or disabled dynamically without restarting the Namenode.
+SPS can be enabled as an external service outside Namenode or disabled dynamically without restarting the Namenode.
 
 Detailed design documentation can be found at [Storage Policy Satisfier(SPS) (HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
 
@@ -125,8 +125,8 @@
 
 ####Configurations:
 
-*   **dfs.storage.policy.satisfier.mode** - Used to enable(internal service inside NN or external service outside NN) or disable SPS.
-   Following string values are supported - `internal`, `external`, `none`. Configuring `internal` or `external` value represents SPS is enable and `none` to disable.
+*   **dfs.storage.policy.satisfier.mode** - Used to enable external service outside NN or disable SPS.
+   Following string values are supported - `external`, `none`. Configuring `external` value represents SPS is enable and `none` to disable.
    The default value is `none`.
 
 *   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to re-check the processed block storage movement
@@ -218,25 +218,17 @@
 
 * Command:
 
-        hdfs storagepolicies -satisfyStoragePolicy [-w] -path <path>
+        hdfs storagepolicies -satisfyStoragePolicy -path <path>
 
 * Arguments:
 
 | | |
 |:---- |:---- |
 | `-path <path>` | The path referring to either a directory or a file. |
-| `-w` | It requests that the command wait till all the files satisfy the policy in given path. This will print the current status of the path in each 10 sec and status are:<br/>PENDING - Path is in queue and not processed for satisfying the policy.<br/>IN_PROGRESS - Satisfying the storage policy for path.<br/>SUCCESS - Storage policy satisfied for the path.<br/>FAILURE : Few blocks failed to move.<br/>NOT_AVAILABLE - Status not available. |
 
-### SPS Running Status
 
-Check the running status of Storage Policy Satisfier service in namenode. If it is running, return 'yes'. Otherwise return 'no'.
-
-* Command:
-
-        hdfs storagepolicies -isInternalSatisfierRunning
-
-### Enable(internal service inside NN or external service outside NN) or Disable SPS without restarting Namenode
-If administrator wants to switch modes of SPS feature while Namenode is running, first he/she needs to update the desired value(internal or external or none) for the configuration item `dfs.storage.policy.satisfier.mode` in configuration file (`hdfs-site.xml`) and then run the following Namenode reconfig command
+### Enable external service outside NN or Disable SPS without restarting Namenode
+If administrator wants to switch modes of SPS feature while Namenode is running, first he/she needs to update the desired value(external or none) for the configuration item `dfs.storage.policy.satisfier.mode` in configuration file (`hdfs-site.xml`) and then run the following Namenode reconfig command
 
 * Command:
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index bab37e4..fb4616a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -59,6 +59,7 @@
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -139,6 +140,7 @@
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
@@ -165,6 +167,7 @@
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.XAttrStorage;
 import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@@ -193,6 +196,7 @@
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Level;
+import org.junit.Assert;
 import org.junit.Assume;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -2491,4 +2495,40 @@
       }
     }, 100, timeout);
   }
+
+  /**
+   * Get namenode connector using the given configuration and file path.
+   *
+   * @param conf
+   *          hdfs configuration
+   * @param filePath
+   *          file path
+   * @param namenodeCount
+   *          number of namenodes
+   * @param createMoverPath
+   *          create move path flag to skip the path creation
+   * @return Namenode connector.
+   * @throws IOException
+   */
+  public static NameNodeConnector getNameNodeConnector(Configuration conf,
+      Path filePath, int namenodeCount, boolean createMoverPath)
+          throws IOException {
+    final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
+    Assert.assertEquals(namenodeCount, namenodes.size());
+    NameNodeConnector.checkOtherInstanceRunning(createMoverPath);
+    while (true) {
+      try {
+        final List<NameNodeConnector> nncs = NameNodeConnector
+            .newNameNodeConnectors(namenodes,
+                StoragePolicySatisfier.class.getSimpleName(),
+                filePath, conf,
+                NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
+        return nncs.get(0);
+      } catch (IOException e) {
+        LOG.warn("Failed to connect with namenode", e);
+        // Ignore
+      }
+    }
+  }
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index d0c3a83..4863ca1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -123,8 +123,6 @@
     Mockito.doReturn(new DNConf(mockDn)).when(mockDn).getDnConf();
     Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
         .when(mockDn).getMetrics();
-    Mockito.doReturn(new StoragePolicySatisfyWorker(conf, mockDn, null))
-        .when(mockDn).getStoragePolicySatisfyWorker();
 
     // Set up a simulated dataset with our fake BP
     mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
@@ -378,8 +376,6 @@
     Mockito.doReturn(new DNConf(mockDn)).when(mockDn).getDnConf();
     Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
       when(mockDn).getMetrics();
-    Mockito.doReturn(new StoragePolicySatisfyWorker(conf, mockDn, null))
-        .when(mockDn).getStoragePolicySatisfyWorker();
     final AtomicInteger count = new AtomicInteger();
     Mockito.doAnswer(new Answer<Void>() {
       @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
deleted file mode 100644
index 51d3254..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStoragePolicySatisfyWorker.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Supplier;
-
-/**
- * This class tests the behavior of moving block replica to the given storage
- * type to fulfill the storage policy requirement.
- */
-public class TestStoragePolicySatisfyWorker {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestStoragePolicySatisfyWorker.class);
-  private static final int DEFAULT_BLOCK_SIZE = 100;
-  private MiniDFSCluster cluster = null;
-  private final Configuration conf = new HdfsConfiguration();
-
-  private static void initConf(Configuration conf) {
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
-    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
-    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
-        1L);
-    conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
-    conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
-    // Reduced refresh cycle to update latest datanodes.
-    conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
-        1000);
-  }
-
-  @Before
-  public void setUp() throws IOException {
-    initConf(conf);
-  }
-
-  @After
-  public void teardown() throws IOException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Tests to verify that the block replica is moving to ARCHIVE storage type to
-   * fulfill the storage policy requirement.
-   */
-  @Test(timeout = 120000)
-  public void testMoveSingleBlockToAnotherDatanode() throws Exception {
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
-        .storageTypes(
-            new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
-                {StorageType.DISK, StorageType.ARCHIVE},
-                {StorageType.ARCHIVE, StorageType.ARCHIVE},
-                {StorageType.ARCHIVE, StorageType.ARCHIVE}})
-        .build();
-    cluster.waitActive();
-    final DistributedFileSystem dfs = cluster.getFileSystem();
-    final String file = "/testMoveSingleBlockToAnotherDatanode";
-    // write to DISK
-    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
-    out.writeChars("testMoveSingleBlockToAnotherDatanode");
-    out.close();
-
-    // verify before movement
-    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-    StorageType[] storageTypes = lb.getStorageTypes();
-    for (StorageType storageType : storageTypes) {
-      Assert.assertTrue(StorageType.DISK == storageType);
-    }
-    // move to ARCHIVE
-    dfs.setStoragePolicy(new Path(file), "COLD");
-
-    dfs.satisfyStoragePolicy(new Path(file));
-
-    cluster.triggerHeartbeats();
-
-    // Wait till NameNode notified about the block location details
-    waitForLocatedBlockWithArchiveStorageType(dfs, file, 2, 30000);
-  }
-
-  /**
-   * Test to verify that satisfy worker can't move blocks. If specified target
-   * datanode doesn't have enough space to accommodate the moving block.
-   */
-  @Test(timeout = 120000)
-  public void testMoveWithNoSpaceAvailable() throws Exception {
-    final long capacity = 150;
-    final String rack0 = "/rack0";
-    final String rack1 = "/rack1";
-    long[] capacities = new long[] {capacity, capacity, capacity / 2};
-    String[] hosts = {"host0", "host1", "host2"};
-    String[] racks = {rack0, rack1, rack0};
-    int numOfDatanodes = capacities.length;
-
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes)
-        .hosts(hosts).racks(racks).simulatedCapacities(capacities)
-        .storageTypes(
-            new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
-                {StorageType.DISK, StorageType.ARCHIVE},
-                {StorageType.ARCHIVE, StorageType.ARCHIVE}})
-        .build();
-
-    cluster.waitActive();
-    InetSocketAddress[] favoredNodes = new InetSocketAddress[3];
-    for (int i = 0; i < favoredNodes.length; i++) {
-      // DFSClient will attempt reverse lookup. In case it resolves
-      // "127.0.0.1" to "localhost", we manually specify the hostname.
-      favoredNodes[i] = cluster.getDataNodes().get(i).getXferAddress();
-    }
-    final DistributedFileSystem dfs = cluster.getFileSystem();
-    final String file = "/testMoveWithNoSpaceAvailable";
-    DFSTestUtil.createFile(dfs, new Path(file), false, 1024, 100,
-        DEFAULT_BLOCK_SIZE, (short) 2, 0, false, favoredNodes);
-
-    // verify before movement
-    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-    StorageType[] storageTypes = lb.getStorageTypes();
-    for (StorageType storageType : storageTypes) {
-      Assert.assertTrue(StorageType.DISK == storageType);
-    }
-
-    // move to ARCHIVE
-    dfs.setStoragePolicy(new Path(file), "COLD");
-
-    lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-    DataNode src = cluster.getDataNodes().get(2);
-    DatanodeInfo targetDnInfo = DFSTestUtil
-        .getLocalDatanodeInfo(src.getXferPort());
-
-    SimpleBlocksMovementsStatusHandler handler =
-        new SimpleBlocksMovementsStatusHandler();
-    StoragePolicySatisfyWorker worker = new StoragePolicySatisfyWorker(conf,
-        src, handler);
-    try {
-      worker.start();
-      List<BlockMovingInfo> blockMovingInfos = new ArrayList<>();
-      BlockMovingInfo blockMovingInfo = prepareBlockMovingInfo(
-          lb.getBlock().getLocalBlock(), lb.getLocations()[0], targetDnInfo,
-          lb.getStorageTypes()[0], StorageType.ARCHIVE);
-      blockMovingInfos.add(blockMovingInfo);
-      worker.processBlockMovingTasks(cluster.getNamesystem().getBlockPoolId(),
-          blockMovingInfos);
-      waitForBlockMovementCompletion(handler, 1, 30000);
-    } finally {
-      worker.stop();
-    }
-  }
-
-  private void waitForBlockMovementCompletion(
-      final SimpleBlocksMovementsStatusHandler handler,
-      int expectedFinishedItemsCount, int timeout) throws Exception {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        List<Block> completedBlocks = handler.getMoveAttemptFinishedBlocks();
-        int finishedCount = completedBlocks.size();
-        LOG.info("Block movement completed count={}, expected={} and actual={}",
-            completedBlocks.size(), expectedFinishedItemsCount, finishedCount);
-        return expectedFinishedItemsCount == finishedCount;
-      }
-    }, 100, timeout);
-  }
-
-  private void waitForLocatedBlockWithArchiveStorageType(
-      final DistributedFileSystem dfs, final String file,
-      int expectedArchiveCount, int timeout) throws Exception {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        LocatedBlock lb = null;
-        try {
-          lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-        } catch (IOException e) {
-          LOG.error("Exception while getting located blocks", e);
-          return false;
-        }
-
-        int archiveCount = 0;
-        for (StorageType storageType : lb.getStorageTypes()) {
-          if (StorageType.ARCHIVE == storageType) {
-            archiveCount++;
-          }
-        }
-        LOG.info("Archive replica count, expected={} and actual={}",
-            expectedArchiveCount, archiveCount);
-        return expectedArchiveCount == archiveCount;
-      }
-    }, 100, timeout);
-  }
-
-  private BlockMovingInfo prepareBlockMovingInfo(Block block,
-      DatanodeInfo src, DatanodeInfo destin, StorageType storageType,
-      StorageType targetStorageType) {
-    return new BlockMovingInfo(block, src, destin, storageType,
-        targetStorageType);
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 900dcdb..200178d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -678,7 +678,7 @@
   public void testMoveWhenStoragePolicySatisfierIsRunning() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
+        StoragePolicySatisfierMode.EXTERNAL.toString());
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(3)
         .storageTypes(
@@ -686,6 +686,9 @@
                 {StorageType.DISK}}).build();
     try {
       cluster.waitActive();
+      // Simulate External sps by creating #getNameNodeConnector instance.
+      DFSTestUtil.getNameNodeConnector(conf, HdfsServerConstants.MOVER_ID_PATH,
+          1, true);
       final DistributedFileSystem dfs = cluster.getFileSystem();
       final String file = "/testMoveWhenStoragePolicySatisfierIsRunning";
       // write to DISK
@@ -697,7 +700,7 @@
       dfs.setStoragePolicy(new Path(file), "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", file.toString()});
-      int exitcode = ExitStatus.SKIPPED_DUE_TO_SPS.getExitCode();
+      int exitcode = ExitStatus.IO_EXCEPTION.getExitCode();
       Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
     } finally {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index ee0b2e6..0a1b129 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -248,17 +248,17 @@
 
     // enable SPS internally by keeping DFS_STORAGE_POLICY_ENABLED_KEY
     nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
+        StoragePolicySatisfierMode.EXTERNAL.toString());
 
     // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
     assertNull("SPS shouldn't start as "
         + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled",
             nameNode.getNamesystem().getBlockManager().getSPSManager());
     verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL, false);
+        StoragePolicySatisfierMode.EXTERNAL, false);
 
     assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-        StoragePolicySatisfierMode.INTERNAL.toString(), nameNode.getConf()
+        StoragePolicySatisfierMode.EXTERNAL.toString(), nameNode.getConf()
             .get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
             DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT));
   }
@@ -285,12 +285,6 @@
           e.getCause());
     }
 
-    // enable internal SPS
-    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
-    verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL, true);
-
     // disable SPS
     nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
         StoragePolicySatisfierMode.NONE.toString());
@@ -302,7 +296,7 @@
         StoragePolicySatisfierMode.EXTERNAL.toString());
     assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
         false, nameNode.getNamesystem().getBlockManager().getSPSManager()
-            .isInternalSatisfierRunning());
+            .isSatisfierRunning());
     assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
         StoragePolicySatisfierMode.EXTERNAL.toString(),
         nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -337,27 +331,15 @@
               + " by admin. Seek for an admin help to enable it "
               + "or use Mover tool.", e);
     }
-
-    // start internal
-    nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-        true, nameNode.getNamesystem().getBlockManager().getSPSManager()
-            .isInternalSatisfierRunning());
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-        StoragePolicySatisfierMode.INTERNAL.toString(),
-        nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-            DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT));
   }
 
   void verifySPSEnabled(final NameNode nameNode, String property,
       StoragePolicySatisfierMode expected, boolean isSatisfierRunning) {
     StoragePolicySatisfyManager spsMgr = nameNode
             .getNamesystem().getBlockManager().getSPSManager();
-    boolean isInternalSatisfierRunning = spsMgr != null
-        ? spsMgr.isInternalSatisfierRunning() : false;
-    assertEquals(property + " has wrong value", isSatisfierRunning,
-        isInternalSatisfierRunning);
+    boolean isSPSRunning = spsMgr != null ? spsMgr.isSatisfierRunning()
+        : false;
+    assertEquals(property + " has wrong value", isSPSRunning, isSPSRunning);
     String actual = nameNode.getConf().get(property,
         DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
     assertEquals(property + " has wrong value", expected,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index e079471..2ad8640 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -29,7 +29,11 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
+import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
@@ -45,11 +49,13 @@
  * Test persistence of satisfying files/directories.
  */
 public class TestPersistentStoragePolicySatisfier {
-
   private static Configuration conf;
 
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem fs;
+  private NameNodeConnector nnc;
+  private StoragePolicySatisfier sps;
+  private ExternalSPSContext ctxt;
 
   private static Path testFile =
       new Path("/testFile");
@@ -65,7 +71,6 @@
   private static final String COLD = "COLD";
   private static final String WARM = "WARM";
   private static final String ONE_SSD = "ONE_SSD";
-  private static final String ALL_SSD = "ALL_SSD";
 
   private static StorageType[][] storageTypes = new StorageType[][] {
       {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD},
@@ -104,7 +109,7 @@
         DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
         "3000");
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
+        StoragePolicySatisfierMode.EXTERNAL.toString());
     // Reduced refresh cycle to update latest datanodes.
     conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
         1000);
@@ -124,6 +129,14 @@
     } else {
       fs = cluster.getFileSystem();
     }
+    nnc = DFSTestUtil.getNameNodeConnector(conf,
+        HdfsServerConstants.MOVER_ID_PATH, 1, false);
+
+    sps = new StoragePolicySatisfier(conf);
+    ctxt = new ExternalSPSContext(sps, nnc);
+
+    sps.init(ctxt);
+    sps.start(true, StoragePolicySatisfierMode.EXTERNAL);
 
     createTestFiles(fs, replication);
   }
@@ -158,6 +171,9 @@
       cluster.shutdown(true);
       cluster = null;
     }
+    if (sps != null) {
+      sps.stopGracefully();
+    }
   }
 
   /**
@@ -203,49 +219,6 @@
   }
 
   /**
-   * Tests to verify satisfier persistence working as expected
-   * in HA env. This test case runs as below:
-   * 1. setup HA cluster env with simple HA topology.
-   * 2. switch the active NameNode from nn0/nn1 to nn1/nn0.
-   * 3. make sure all the storage policies are satisfied.
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testWithHA() throws Exception {
-    try {
-      // Enable HA env for testing.
-      clusterSetUp(true, new HdfsConfiguration());
-
-      fs.setStoragePolicy(testFile, ALL_SSD);
-      fs.satisfyStoragePolicy(testFile);
-
-      cluster.transitionToStandby(0);
-      cluster.transitionToActive(1);
-
-      DFSTestUtil.waitExpectedStorageType(
-          testFileName, StorageType.SSD, 3, timeout, fs);
-
-      // test directory
-      fs.setStoragePolicy(parentDir, WARM);
-      fs.satisfyStoragePolicy(parentDir);
-      cluster.transitionToStandby(1);
-      cluster.transitionToActive(0);
-
-      DFSTestUtil.waitExpectedStorageType(
-          parentFileName, StorageType.DISK, 1, timeout, fs);
-      DFSTestUtil.waitExpectedStorageType(
-          parentFileName, StorageType.ARCHIVE, 2, timeout, fs);
-      DFSTestUtil.waitExpectedStorageType(
-          childFileName, StorageType.DISK, 1, timeout, fs);
-      DFSTestUtil.waitExpectedStorageType(
-          childFileName, StorageType.ARCHIVE, 2, timeout, fs);
-    } finally {
-      clusterShutdown();
-    }
-  }
-
-
-  /**
    * Tests to verify satisfier persistence working well with multiple
    * restarts operations. This test case runs as below:
    * 1. satisfy the storage policy of file1.
@@ -282,63 +255,6 @@
   }
 
   /**
-   * Tests to verify satisfier persistence working well with
-   * federal HA env. This test case runs as below:
-   * 1. setup HA test environment with federal topology.
-   * 2. satisfy storage policy of file1.
-   * 3. switch active NameNode from nn0 to nn1.
-   * 4. switch active NameNode from nn2 to nn3.
-   * 5. check whether the storage policy of file1 is satisfied.
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testWithFederationHA() throws Exception {
-    MiniDFSCluster haCluster = null;
-    try {
-      conf = new HdfsConfiguration();
-      conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-          StoragePolicySatisfierMode.INTERNAL.toString());
-      // Reduced refresh cycle to update latest datanodes.
-      conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
-          1000);
-      haCluster = new MiniDFSCluster
-          .Builder(conf)
-          .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
-          .storageTypes(storageTypes)
-          .numDataNodes(storageTypes.length).build();
-      haCluster.waitActive();
-      haCluster.transitionToActive(1);
-      haCluster.transitionToActive(3);
-
-      fs = HATestUtil.configureFailoverFs(haCluster, conf);
-      createTestFiles(fs, (short) 3);
-
-      fs.setStoragePolicy(testFile, WARM);
-      fs.satisfyStoragePolicy(testFile);
-
-      haCluster.transitionToStandby(1);
-      haCluster.transitionToActive(0);
-      haCluster.transitionToStandby(3);
-      haCluster.transitionToActive(2);
-
-      DFSTestUtil.waitExpectedStorageType(
-          testFileName, StorageType.DISK, 1, timeout, fs);
-      DFSTestUtil.waitExpectedStorageType(
-          testFileName, StorageType.ARCHIVE, 2, timeout, fs);
-
-    } finally {
-      if(fs != null) {
-        fs.close();
-        fs = null;
-      }
-      if(haCluster != null) {
-        haCluster.shutdown(true);
-        haCluster = null;
-      }
-    }
-  }
-
-  /**
    * Tests to verify SPS xattr will be removed if the satisfy work has
    * been finished, expect that the method satisfyStoragePolicy can be
    * invoked on the same file again after the block movement has been
@@ -388,7 +304,7 @@
    * 3. make sure sps xattr is removed.
    * @throws Exception
    */
-  @Test(timeout = 300000)
+  @Test(timeout = 300000000)
   public void testDropSPS() throws Exception {
     try {
       clusterSetUp();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
index 0cadc83..cf04db0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
@@ -17,11 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.apache.hadoop.util.Time.monotonicNow;
-
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationException;
@@ -32,24 +28,15 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Tests that StoragePolicySatisfier is able to work with HA enabled.
  */
 public class TestStoragePolicySatisfierWithHA {
   private MiniDFSCluster cluster = null;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestStoragePolicySatisfierWithHA.class);
 
   private final Configuration config = new HdfsConfiguration();
   private static final int DEFAULT_BLOCK_SIZE = 1024;
@@ -67,7 +54,7 @@
   private void createCluster() throws IOException {
     config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     config.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
+        StoragePolicySatisfierMode.EXTERNAL.toString());
     // Reduced refresh cycle to update latest datanodes.
     config.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
         1000);
@@ -101,50 +88,19 @@
   public void testWhenNNHAStateChanges() throws IOException {
     try {
       createCluster();
-      boolean running;
-
-      dfs = cluster.getFileSystem(1);
-
-      try {
-        dfs.getClient().isInternalSatisfierRunning();
-        Assert.fail("Call this function to Standby NN should "
-            + "raise an exception.");
-      } catch (RemoteException e) {
-        IOException cause = e.unwrapRemoteException();
-        if (!(cause instanceof StandbyException)) {
-          Assert.fail("Unexpected exception happened " + e);
-        }
-      }
-
-      cluster.transitionToActive(0);
-      dfs = cluster.getFileSystem(0);
-      running = dfs.getClient().isInternalSatisfierRunning();
-      Assert.assertTrue("StoragePolicySatisfier should be active "
-          + "when NN transits from Standby to Active mode.", running);
-
       // NN transits from Active to Standby
       cluster.transitionToStandby(0);
-      try {
-        dfs.getClient().isInternalSatisfierRunning();
-        Assert.fail("NN in Standby again, call this function should "
-            + "raise an exception.");
-      } catch (RemoteException e) {
-        IOException cause = e.unwrapRemoteException();
-        if (!(cause instanceof StandbyException)) {
-          Assert.fail("Unexpected exception happened " + e);
-        }
-      }
-
+      cluster.waitActive();
       try {
         cluster.getNameNode(0).reconfigurePropertyImpl(
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-            StoragePolicySatisfierMode.EXTERNAL.toString());
+            StoragePolicySatisfierMode.NONE.toString());
         Assert.fail("It's not allowed to enable or disable"
             + " StoragePolicySatisfier on Standby NameNode");
       } catch (ReconfigurationException e) {
         GenericTestUtils.assertExceptionContains("Could not change property "
             + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY
-            + " from 'INTERNAL' to 'EXTERNAL'", e);
+            + " from 'EXTERNAL' to 'NONE'", e);
         GenericTestUtils.assertExceptionContains(
             "Enabling or disabling storage policy satisfier service on "
                 + "standby NameNode is not allowed", e.getCause());
@@ -153,104 +109,4 @@
       cluster.shutdown();
     }
   }
-
-  /**
-   * Test to verify that during namenode switch over will add
-   * DNA_DROP_SPS_WORK_COMMAND to all the datanodes. Later, this will ensure to
-   * drop all the SPS queues at datanode.
-   */
-  @Test(timeout = 90000)
-  public void testNamenodeSwitchoverShouldDropSPSWork() throws Exception {
-    try {
-      createCluster();
-
-      FSNamesystem fsn = cluster.getNamesystem(0);
-      ArrayList<DataNode> dataNodes = cluster.getDataNodes();
-      List<DatanodeDescriptor> listOfDns = new ArrayList<>();
-      for (DataNode dn : dataNodes) {
-        DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn,
-            dn.getDatanodeId());
-        listOfDns.add(dnd);
-      }
-      cluster.shutdownDataNodes();
-
-      cluster.transitionToStandby(0);
-      LOG.info("**Transition to Active**");
-      cluster.transitionToActive(1);
-
-      // Verify that Standby-to-Active transition should set drop SPS flag to
-      // true. This will ensure that DNA_DROP_SPS_WORK_COMMAND will be
-      // propagated to datanode during heartbeat response.
-      int retries = 20;
-      boolean dropSPSWork = false;
-      while (retries > 0) {
-        for (DatanodeDescriptor dnd : listOfDns) {
-          dropSPSWork = dnd.shouldDropSPSWork();
-          if (!dropSPSWork) {
-            retries--;
-            Thread.sleep(250);
-            break;
-          }
-        }
-        if (dropSPSWork) {
-          break;
-        }
-      }
-      Assert.assertTrue("Didn't drop SPS work", dropSPSWork);
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Test to verify that SPS work will be dropped once the datanode is marked as
-   * expired. Internally 'dropSPSWork' flag is set as true while expiration and
-   * at the time of reconnection, will send DNA_DROP_SPS_WORK_COMMAND to that
-   * datanode.
-   */
-  @Test(timeout = 90000)
-  public void testDeadDatanode() throws Exception {
-    int heartbeatExpireInterval = 2 * 2000;
-    config.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
-        3000);
-    config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000L);
-    createCluster();
-
-    DataNode dn = cluster.getDataNodes().get(0);
-    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
-
-    FSNamesystem fsn = cluster.getNamesystem(0);
-    DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn,
-        dn.getDatanodeId());
-    boolean isDead = false;
-    int retries = 20;
-    while (retries > 0) {
-      isDead = dnd.getLastUpdateMonotonic() < (monotonicNow()
-          - heartbeatExpireInterval);
-      if (isDead) {
-        break;
-      }
-      retries--;
-      Thread.sleep(250);
-    }
-    Assert.assertTrue("Datanode is alive", isDead);
-    // Disable datanode heartbeat, so that the datanode will get expired after
-    // the recheck interval and become dead.
-    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
-
-    // Verify that datanode expiration will set drop SPS flag to
-    // true. This will ensure that DNA_DROP_SPS_WORK_COMMAND will be
-    // propagated to datanode during reconnection.
-    boolean dropSPSWork = false;
-    retries = 50;
-    while (retries > 0) {
-      dropSPSWork = dnd.shouldDropSPSWork();
-      if (dropSPSWork) {
-        break;
-      }
-      retries--;
-      Thread.sleep(100);
-    }
-    Assert.assertTrue("Didn't drop SPS work", dropSPSWork);
-  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
index f85769f..f48521b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
@@ -34,6 +34,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.StorageTypeNodePair;
+import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -52,7 +53,7 @@
   @Before
   public void setup() throws Exception {
     Configuration config = new HdfsConfiguration();
-    Context ctxt = Mockito.mock(IntraSPSNameNodeContext.class);
+    Context ctxt = Mockito.mock(ExternalSPSContext.class);
     SPSService sps = new StoragePolicySatisfier(config);
     Mockito.when(ctxt.isRunning()).thenReturn(true);
     Mockito.when(ctxt.isInSafeMode()).thenReturn(false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
deleted file mode 100644
index ec5307b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ /dev/null
@@ -1,1825 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY;
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
-import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
-import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
- * moved and finding its suggested target locations to move.
- */
-public class TestStoragePolicySatisfier {
-
-  {
-    GenericTestUtils.setLogLevel(
-        getLogger(FSTreeTraverser.class), Level.DEBUG);
-  }
-
-  private static final String ONE_SSD = "ONE_SSD";
-  private static final String COLD = "COLD";
-  protected static final Logger LOG =
-      LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
-  private Configuration config = null;
-  private StorageType[][] allDiskTypes =
-      new StorageType[][]{{StorageType.DISK, StorageType.DISK},
-          {StorageType.DISK, StorageType.DISK},
-          {StorageType.DISK, StorageType.DISK}};
-  private MiniDFSCluster hdfsCluster = null;
-  private DistributedFileSystem dfs = null;
-  public static final int NUM_OF_DATANODES = 3;
-  public static final int STORAGES_PER_DATANODE = 2;
-  public static final long CAPACITY = 2 * 256 * 1024 * 1024;
-  public static final String FILE = "/testMoveToSatisfyStoragePolicy";
-  public static final int DEFAULT_BLOCK_SIZE = 1024;
-
-  /**
-   * Sets hdfs cluster.
-   */
-  public void setCluster(MiniDFSCluster cluster) {
-    this.hdfsCluster = cluster;
-  }
-
-  /**
-   * @return conf.
-   */
-  public Configuration getConf() {
-    return this.config;
-  }
-
-  /**
-   * @return hdfs cluster.
-   */
-  public MiniDFSCluster getCluster() {
-    return hdfsCluster;
-  }
-
-  /**
-   * Gets distributed file system.
-   *
-   * @throws IOException
-   */
-  public DistributedFileSystem getFS() throws IOException {
-    this.dfs = hdfsCluster.getFileSystem();
-    return this.dfs;
-  }
-
-  @After
-  public void shutdownCluster() {
-    if (hdfsCluster != null) {
-      hdfsCluster.shutdown();
-    }
-  }
-
-  public void createCluster() throws IOException {
-    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
-        STORAGES_PER_DATANODE, CAPACITY);
-    getFS();
-    writeContent(FILE);
-  }
-
-  @Before
-  public void setUp() {
-    config = new HdfsConfiguration();
-    config.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
-    // Most of the tests are restarting DNs and NN. So, reduced refresh cycle to
-    // update latest datanodes.
-    config.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
-        3000);
-  }
-
-  @Test(timeout = 300000)
-  public void testWhenStoragePolicySetToCOLD()
-      throws Exception {
-
-    try {
-      createCluster();
-      doTestWhenStoragePolicySetToCOLD();
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  private void doTestWhenStoragePolicySetToCOLD() throws Exception {
-    // Change policy to COLD
-    dfs.setStoragePolicy(new Path(FILE), COLD);
-
-    StorageType[][] newtypes =
-        new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
-            {StorageType.ARCHIVE, StorageType.ARCHIVE},
-            {StorageType.ARCHIVE, StorageType.ARCHIVE}};
-    startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
-        STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-
-    hdfsCluster.triggerHeartbeats();
-    dfs.satisfyStoragePolicy(new Path(FILE));
-    // Wait till namenode notified about the block location details
-    DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 3, 35000,
-        dfs);
-  }
-
-  @Test(timeout = 300000)
-  public void testWhenStoragePolicySetToALLSSD()
-      throws Exception {
-    try {
-      createCluster();
-      // Change policy to ALL_SSD
-      dfs.setStoragePolicy(new Path(FILE), "ALL_SSD");
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.SSD, StorageType.DISK},
-              {StorageType.SSD, StorageType.DISK},
-              {StorageType.SSD, StorageType.DISK}};
-
-      // Making sure SDD based nodes added to cluster. Adding SSD based
-      // datanodes.
-      startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-      // Wait till StorgePolicySatisfier Identified that block to move to SSD
-      // areas
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 3, 30000, dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  @Test(timeout = 300000)
-  public void testWhenStoragePolicySetToONESSD()
-      throws Exception {
-    try {
-      createCluster();
-      // Change policy to ONE_SSD
-      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
-
-      // Making sure SDD based nodes added to cluster. Adding SSD based
-      // datanodes.
-      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-      // Wait till StorgePolicySatisfier Identified that block to move to SSD
-      // areas
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 1, 30000, dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
-          dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify that the block storage movement report will be propagated
-   * to Namenode via datanode heartbeat.
-   */
-  @Test(timeout = 300000)
-  public void testBlksStorageMovementAttemptFinishedReport() throws Exception {
-    try {
-      createCluster();
-      // Change policy to ONE_SSD
-      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
-
-      // Making sure SDD based nodes added to cluster. Adding SSD based
-      // datanodes.
-      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-
-      // Wait till the block is moved to SSD areas
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 1, 30000, dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
-          dfs);
-
-      waitForBlocksMovementAttemptReport(1, 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify that multiple files are giving to satisfy storage policy
-   * and should work well altogether.
-   */
-  @Test(timeout = 300000)
-  public void testMultipleFilesForSatisfyStoragePolicy() throws Exception {
-    try {
-      createCluster();
-      List<String> files = new ArrayList<>();
-      files.add(FILE);
-
-      // Creates 4 more files. Send all of them for satisfying the storage
-      // policy together.
-      for (int i = 0; i < 4; i++) {
-        String file1 = "/testMoveWhenStoragePolicyNotSatisfying_" + i;
-        files.add(file1);
-        writeContent(file1);
-      }
-      // Change policy to ONE_SSD
-      for (String fileName : files) {
-        dfs.setStoragePolicy(new Path(fileName), ONE_SSD);
-        dfs.satisfyStoragePolicy(new Path(fileName));
-      }
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
-
-      // Making sure SDD based nodes added to cluster. Adding SSD based
-      // datanodes.
-      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-      hdfsCluster.triggerHeartbeats();
-
-      for (String fileName : files) {
-        // Wait till the block is moved to SSD areas
-        DFSTestUtil.waitExpectedStorageType(
-            fileName, StorageType.SSD, 1, 30000, dfs);
-        DFSTestUtil.waitExpectedStorageType(
-            fileName, StorageType.DISK, 2, 30000, dfs);
-      }
-
-      waitForBlocksMovementAttemptReport(files.size(), 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify hdfsAdmin.satisfyStoragePolicy works well for file.
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testSatisfyFileWithHdfsAdmin() throws Exception {
-    try {
-      createCluster();
-      HdfsAdmin hdfsAdmin =
-          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(FILE), COLD);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
-              {StorageType.DISK, StorageType.ARCHIVE},
-              {StorageType.DISK, StorageType.ARCHIVE}};
-      startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-
-      hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
-
-      hdfsCluster.triggerHeartbeats();
-      // Wait till namenode notified about the block location details
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 3, 30000,
-          dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify hdfsAdmin.satisfyStoragePolicy works well for dir.
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testSatisfyDirWithHdfsAdmin() throws Exception {
-    try {
-      createCluster();
-      HdfsAdmin hdfsAdmin =
-          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
-      final String subDir = "/subDir";
-      final String subFile1 = subDir + "/subFile1";
-      final String subDir2 = subDir + "/subDir2";
-      final String subFile2 = subDir2 + "/subFile2";
-      dfs.mkdirs(new Path(subDir));
-      writeContent(subFile1);
-      dfs.mkdirs(new Path(subDir2));
-      writeContent(subFile2);
-
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(subDir), ONE_SSD);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
-      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-
-      hdfsAdmin.satisfyStoragePolicy(new Path(subDir));
-
-      hdfsCluster.triggerHeartbeats();
-
-      // take effect for the file in the directory.
-      DFSTestUtil.waitExpectedStorageType(
-          subFile1, StorageType.SSD, 1, 30000, dfs);
-      DFSTestUtil.waitExpectedStorageType(
-          subFile1, StorageType.DISK, 2, 30000, dfs);
-
-      // take no effect for the sub-dir's file in the directory.
-      DFSTestUtil.waitExpectedStorageType(
-          subFile2, StorageType.SSD, 1, 30000, dfs);
-      DFSTestUtil.waitExpectedStorageType(
-          subFile2, StorageType.DISK, 2, 30000, dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify hdfsAdmin.satisfyStoragePolicy exceptions.
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testSatisfyWithExceptions() throws Exception {
-    try {
-      createCluster();
-      final String nonExistingFile = "/noneExistingFile";
-      hdfsCluster.getConfiguration(0).
-          setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
-      restartNamenode();
-      HdfsAdmin hdfsAdmin =
-          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
-
-      try {
-        hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
-        Assert.fail(String.format(
-            "Should failed to satisfy storage policy "
-                + "for %s since %s is set to false.",
-            FILE, DFS_STORAGE_POLICY_ENABLED_KEY));
-      } catch (IOException e) {
-        GenericTestUtils.assertExceptionContains(String.format(
-            "Failed to satisfy storage policy since %s is set to false.",
-            DFS_STORAGE_POLICY_ENABLED_KEY), e);
-      }
-
-      hdfsCluster.getConfiguration(0).
-          setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
-      restartNamenode();
-
-      hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(config), config);
-      try {
-        hdfsAdmin.satisfyStoragePolicy(new Path(nonExistingFile));
-        Assert.fail("Should throw FileNotFoundException for " +
-            nonExistingFile);
-      } catch (FileNotFoundException e) {
-
-      }
-
-      try {
-        hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
-        hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
-      } catch (Exception e) {
-        Assert.fail(String.format("Allow to invoke mutlipe times "
-            + "#satisfyStoragePolicy() api for a path %s , internally just "
-            + "skipping addtion to satisfy movement queue.", FILE));
-      }
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify that for the given path, some of the blocks or block src
-   * locations(src nodes) under the given path will be scheduled for block
-   * movement.
-   *
-   * For example, there are two block for a file:
-   *
-   * File1 => blk_1[locations=A(DISK),B(DISK),C(DISK)],
-   * blk_2[locations=A(DISK),B(DISK),C(DISK)]. Now, set storage policy to COLD.
-   * Only one datanode is available with storage type ARCHIVE, say D.
-   *
-   * SPS will schedule block movement to the coordinator node with the details,
-   * blk_1[move A(DISK) -> D(ARCHIVE)], blk_2[move A(DISK) -> D(ARCHIVE)].
-   */
-  @Test(timeout = 300000)
-  public void testWhenOnlyFewTargetDatanodeAreAvailableToSatisfyStoragePolicy()
-      throws Exception {
-    try {
-      createCluster();
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(FILE), COLD);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE}};
-
-      // Adding ARCHIVE based datanodes.
-      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-      // Wait till StorgePolicySatisfier identified that block to move to
-      // ARCHIVE area.
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 1, 30000,
-          dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
-          dfs);
-
-      waitForBlocksMovementAttemptReport(1, 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify that for the given path, no blocks or block src
-   * locations(src nodes) under the given path will be scheduled for block
-   * movement as there are no available datanode with required storage type.
-   *
-   * For example, there are two block for a file:
-   *
-   * File1 => blk_1[locations=A(DISK),B(DISK),C(DISK)],
-   * blk_2[locations=A(DISK),B(DISK),C(DISK)]. Now, set storage policy to COLD.
-   * No datanode is available with storage type ARCHIVE.
-   *
-   * SPS won't schedule any block movement for this path.
-   */
-  @Test(timeout = 300000)
-  public void testWhenNoTargetDatanodeToSatisfyStoragePolicy()
-      throws Exception {
-    try {
-      createCluster();
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(FILE), COLD);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.DISK, StorageType.DISK}};
-      // Adding DISK based datanodes
-      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-
-      // No block movement will be scheduled as there is no target node
-      // available with the required storage type.
-      waitForAttemptedItems(1, 30000);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
-          dfs);
-      // Since there is no target node the item will get timed out and then
-      // re-attempted.
-      waitForAttemptedItems(1, 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify that SPS should not start when a Mover instance
-   * is running.
-   */
-  @Test(timeout = 300000)
-  public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
-      throws Exception {
-    boolean running;
-    FSDataOutputStream out = null;
-    try {
-      createCluster();
-      // Stop SPS
-      hdfsCluster.getNameNode().reconfigureProperty(
-          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-          StoragePolicySatisfierMode.NONE.toString());
-      running = hdfsCluster.getFileSystem()
-          .getClient().isInternalSatisfierRunning();
-      Assert.assertFalse("SPS should stopped as configured.", running);
-
-      // Simulate the case by creating MOVER_ID file
-      out = hdfsCluster.getFileSystem().create(
-          HdfsServerConstants.MOVER_ID_PATH);
-
-      // Restart SPS
-      hdfsCluster.getNameNode().reconfigureProperty(
-          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-          StoragePolicySatisfierMode.INTERNAL.toString());
-
-      running = hdfsCluster.getFileSystem()
-          .getClient().isInternalSatisfierRunning();
-      Assert.assertFalse("SPS should not be able to run as file "
-          + HdfsServerConstants.MOVER_ID_PATH + " is being hold.", running);
-
-      // Simulate Mover exists
-      out.close();
-      out = null;
-      hdfsCluster.getFileSystem().delete(
-          HdfsServerConstants.MOVER_ID_PATH, true);
-
-      // Restart SPS again
-      hdfsCluster.getNameNode().reconfigureProperty(
-          DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-          StoragePolicySatisfierMode.INTERNAL.toString());
-      running = hdfsCluster.getFileSystem()
-          .getClient().isInternalSatisfierRunning();
-      Assert.assertTrue("SPS should be running as "
-          + "Mover already exited", running);
-
-      // Check functionality after SPS restart
-      doTestWhenStoragePolicySetToCOLD();
-    } catch (ReconfigurationException e) {
-      throw new IOException("Exception when reconfigure "
-          + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, e);
-    } finally {
-      if (out != null) {
-        out.close();
-      }
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify that SPS should be able to start when the Mover ID file
-   * is not being hold by a Mover. This can be the case when Mover exits
-   * ungracefully without deleting the ID file from HDFS.
-   */
-  @Test(timeout = 300000)
-  public void testWhenMoverExitsWithoutDeleteMoverIDFile()
-      throws IOException {
-    try {
-      createCluster();
-      // Simulate the case by creating MOVER_ID file
-      DFSTestUtil.createFile(hdfsCluster.getFileSystem(),
-          HdfsServerConstants.MOVER_ID_PATH, 0, (short) 1, 0);
-      restartNamenode();
-      boolean running = hdfsCluster.getFileSystem()
-          .getClient().isInternalSatisfierRunning();
-      Assert.assertTrue("SPS should be running as "
-          + "no Mover really running", running);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test to verify that satisfy worker can't move blocks. If the given block is
-   * pinned it shouldn't be considered for retries.
-   */
-  @Test(timeout = 120000)
-  public void testMoveWithBlockPinning() throws Exception {
-    try{
-      config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
-      hdfsCluster = startCluster(config, allDiskTypes, 3, 2, CAPACITY);
-
-      hdfsCluster.waitActive();
-      dfs = hdfsCluster.getFileSystem();
-
-      // create a file with replication factor 3 and mark 2 pinned block
-      // locations.
-      final String file1 = createFileAndSimulateFavoredNodes(2);
-
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(file1), COLD);
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE},
-              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
-      // Adding DISK based datanodes
-      startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-
-      dfs.satisfyStoragePolicy(new Path(file1));
-      hdfsCluster.triggerHeartbeats();
-
-      // No block movement will be scheduled as there is no target node
-      // available with the required storage type.
-      waitForAttemptedItems(1, 30000);
-      waitForBlocksMovementAttemptReport(1, 30000);
-      DFSTestUtil.waitExpectedStorageType(
-          file1, StorageType.ARCHIVE, 1, 30000, dfs);
-      DFSTestUtil.waitExpectedStorageType(
-          file1, StorageType.DISK, 2, 30000, dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests to verify that for the given path, only few of the blocks or block
-   * src locations(src nodes) under the given path will be scheduled for block
-   * movement.
-   *
-   * For example, there are two block for a file:
-   *
-   * File1 => two blocks and default storage policy(HOT).
-   * blk_1[locations=A(DISK),B(DISK),C(DISK),D(DISK),E(DISK)],
-   * blk_2[locations=A(DISK),B(DISK),C(DISK),D(DISK),E(DISK)].
-   *
-   * Now, set storage policy to COLD.
-   * Only two Dns are available with expected storage type ARCHIVE, say A, E.
-   *
-   * SPS will schedule block movement to the coordinator node with the details,
-   * blk_1[move A(DISK) -> A(ARCHIVE), move E(DISK) -> E(ARCHIVE)],
-   * blk_2[move A(DISK) -> A(ARCHIVE), move E(DISK) -> E(ARCHIVE)].
-   */
-  @Test(timeout = 300000)
-  public void testWhenOnlyFewSourceNodesHaveMatchingTargetNodes()
-      throws Exception {
-    try {
-      int numOfDns = 5;
-      config.setLong("dfs.block.size", 1024);
-      allDiskTypes =
-          new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
-              {StorageType.DISK, StorageType.DISK},
-              {StorageType.DISK, StorageType.DISK},
-              {StorageType.DISK, StorageType.DISK},
-              {StorageType.DISK, StorageType.ARCHIVE}};
-      hdfsCluster = startCluster(config, allDiskTypes, numOfDns,
-          STORAGES_PER_DATANODE, CAPACITY);
-      dfs = hdfsCluster.getFileSystem();
-      writeContent(FILE, (short) 5);
-
-      // Change policy to COLD
-      dfs.setStoragePolicy(new Path(FILE), COLD);
-
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-      // Wait till StorgePolicySatisfier identified that block to move to
-      // ARCHIVE area.
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 2, 30000,
-          dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
-          dfs);
-
-      waitForBlocksMovementAttemptReport(1, 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests that moving block storage with in the same datanode. Let's say we
-   * have DN1[DISK,ARCHIVE], DN2[DISK, SSD], DN3[DISK,RAM_DISK] when
-   * storagepolicy set to ONE_SSD and request satisfyStoragePolicy, then block
-   * should move to DN2[SSD] successfully.
-   */
-  @Test(timeout = 300000)
-  public void testBlockMoveInSameDatanodeWithONESSD() throws Exception {
-    StorageType[][] diskTypes =
-        new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
-            {StorageType.DISK, StorageType.SSD},
-            {StorageType.DISK, StorageType.RAM_DISK}};
-    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    try {
-      hdfsCluster = startCluster(config, diskTypes, NUM_OF_DATANODES,
-          STORAGES_PER_DATANODE, CAPACITY);
-      dfs = hdfsCluster.getFileSystem();
-      writeContent(FILE);
-
-      // Change policy to ONE_SSD
-      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
-
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 1, 30000, dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
-          dfs);
-
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests that moving block storage with in the same datanode and remote node.
-   * Let's say we have DN1[DISK,ARCHIVE], DN2[ARCHIVE, SSD], DN3[DISK,DISK],
-   * DN4[DISK,DISK] when storagepolicy set to WARM and request
-   * satisfyStoragePolicy, then block should move to DN1[ARCHIVE] and
-   * DN2[ARCHIVE] successfully.
-   */
-  @Test(timeout = 300000)
-  public void testBlockMoveInSameAndRemoteDatanodesWithWARM() throws Exception {
-    StorageType[][] diskTypes =
-        new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
-            {StorageType.ARCHIVE, StorageType.SSD},
-            {StorageType.DISK, StorageType.DISK},
-            {StorageType.DISK, StorageType.DISK}};
-
-    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    try {
-      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
-          STORAGES_PER_DATANODE, CAPACITY);
-      dfs = hdfsCluster.getFileSystem();
-      writeContent(FILE);
-
-      // Change policy to WARM
-      dfs.setStoragePolicy(new Path(FILE), "WARM");
-      dfs.satisfyStoragePolicy(new Path(FILE));
-      hdfsCluster.triggerHeartbeats();
-
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 1, 30000,
-          dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 2, 30000,
-          dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * If replica with expected storage type already exist in source DN then that
-   * DN should be skipped.
-   */
-  @Test(timeout = 300000)
-  public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
-      throws Exception {
-    StorageType[][] diskTypes = new StorageType[][] {
-        {StorageType.DISK, StorageType.ARCHIVE},
-        {StorageType.DISK, StorageType.ARCHIVE},
-        {StorageType.DISK, StorageType.ARCHIVE}};
-
-    try {
-      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
-          STORAGES_PER_DATANODE, CAPACITY);
-      dfs = hdfsCluster.getFileSystem();
-      // 1. Write two replica on disk
-      DFSTestUtil.createFile(dfs, new Path(FILE), DEFAULT_BLOCK_SIZE,
-          (short) 2, 0);
-      // 2. Change policy to COLD, so third replica will be written to ARCHIVE.
-      dfs.setStoragePolicy(new Path(FILE), "COLD");
-
-      // 3.Change replication factor to 3.
-      dfs.setReplication(new Path(FILE), (short) 3);
-
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
-          dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 1, 30000,
-          dfs);
-
-      // 4. Change policy to HOT, so we can move the all block to DISK.
-      dfs.setStoragePolicy(new Path(FILE), "HOT");
-
-      // 4. Satisfy the policy.
-      dfs.satisfyStoragePolicy(new Path(FILE));
-
-      // 5. Block should move successfully .
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
-          dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests that movements should not be assigned when there is no space in
-   * target DN.
-   */
-  @Test(timeout = 300000)
-  public void testChooseInSameDatanodeWithONESSDShouldNotChooseIfNoSpace()
-      throws Exception {
-    StorageType[][] diskTypes =
-        new StorageType[][]{{StorageType.DISK, StorageType.DISK},
-            {StorageType.DISK, StorageType.SSD},
-            {StorageType.DISK, StorageType.DISK}};
-    config.setLong("dfs.block.size", 2 * DEFAULT_BLOCK_SIZE);
-    long dnCapacity = 1024 * DEFAULT_BLOCK_SIZE + (2 * DEFAULT_BLOCK_SIZE - 1);
-    try {
-      hdfsCluster = startCluster(config, diskTypes, NUM_OF_DATANODES,
-          STORAGES_PER_DATANODE, dnCapacity);
-      dfs = hdfsCluster.getFileSystem();
-      writeContent(FILE);
-
-      // Change policy to ONE_SSD
-      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
-      Path filePath = new Path("/testChooseInSameDatanode");
-      final FSDataOutputStream out =
-          dfs.create(filePath, false, 100, (short) 1, 2 * DEFAULT_BLOCK_SIZE);
-      try {
-        dfs.setStoragePolicy(filePath, ONE_SSD);
-        // Try to fill up SSD part by writing content
-        long remaining = dfs.getStatus().getRemaining() / (3 * 2);
-        for (int i = 0; i < remaining; i++) {
-          out.write(i);
-        }
-      } finally {
-        out.close();
-      }
-      hdfsCluster.triggerHeartbeats();
-      ArrayList<DataNode> dataNodes = hdfsCluster.getDataNodes();
-      // Temporarily disable heart beats, so that we can assert whether any
-      // items schedules for DNs even though DN's does not have space to write.
-      // Disabling heart beats can keep scheduled items on DatanodeDescriptor
-      // itself.
-      for (DataNode dataNode : dataNodes) {
-        DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNode, true);
-      }
-      dfs.satisfyStoragePolicy(new Path(FILE));
-
-      // Wait for items to be processed
-      waitForAttemptedItems(1, 30000);
-
-      // Make sure no items assigned for movements
-      Set<DatanodeDescriptor> dns = hdfsCluster.getNamesystem()
-          .getBlockManager().getDatanodeManager().getDatanodes();
-      for (DatanodeDescriptor dd : dns) {
-        assertNull(dd.getBlocksToMoveStorages(1));
-      }
-
-      // Enable heart beats now
-      for (DataNode dataNode : dataNodes) {
-        DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNode, false);
-      }
-      hdfsCluster.triggerHeartbeats();
-
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
-          dfs);
-      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 0, 30000, dfs);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Tests that Xattrs should be cleaned if satisfy storage policy called on EC
-   * file with unsuitable storage policy set.
-   *
-   * @throws Exception
-   */
-  @Test(timeout = 300000)
-  public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles()
-      throws Exception {
-    StorageType[][] diskTypes =
-        new StorageType[][]{{StorageType.SSD, StorageType.DISK},
-            {StorageType.SSD, StorageType.DISK},
-            {StorageType.SSD, StorageType.DISK},
-            {StorageType.SSD, StorageType.DISK},
-            {StorageType.SSD, StorageType.DISK},
-            {StorageType.DISK, StorageType.SSD},
-            {StorageType.DISK, StorageType.SSD},
-            {StorageType.DISK, StorageType.SSD},
-            {StorageType.DISK, StorageType.SSD},
-            {StorageType.DISK, StorageType.SSD}};
-
-    int defaultStripedBlockSize =
-        StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4;
-    config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
-    config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
-    config.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
-        1L);
-    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
-        false);
-    try {
-      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
-          STORAGES_PER_DATANODE, CAPACITY);
-      dfs = hdfsCluster.getFileSystem();
-      dfs.enableErasureCodingPolicy(
-          StripedFileTestUtil.getDefaultECPolicy().getName());
-
-      // set "/foo" directory with ONE_SSD storage policy.
-      ClientProtocol client = NameNodeProxies.createProxy(config,
-          hdfsCluster.getFileSystem(0).getUri(), ClientProtocol.class)
-          .getProxy();
-      String fooDir = "/foo";
-      client.mkdirs(fooDir, new FsPermission((short) 777), true);
-      // set an EC policy on "/foo" directory
-      client.setErasureCodingPolicy(fooDir,
-          StripedFileTestUtil.getDefaultECPolicy().getName());
-
-      // write file to fooDir
-      final String testFile = "/foo/bar";
-      long fileLen = 20 * defaultStripedBlockSize;
-      DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short) 3, 0);
-
-      // ONESSD is unsuitable storage policy on EC files
-      client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
-      dfs.satisfyStoragePolicy(new Path(testFile));
-
-      // Thread.sleep(9000); // To make sure SPS triggered
-      // verify storage types and locations
-      LocatedBlocks locatedBlocks =
-          client.getBlockLocations(testFile, 0, fileLen);
-      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
-        for (StorageType type : lb.getStorageTypes()) {
-          Assert.assertEquals(StorageType.DISK, type);
-        }
-      }
-
-      // Make sure satisfy xattr has been removed.
-      DFSTestUtil.waitForXattrRemoved(testFile, XATTR_SATISFY_STORAGE_POLICY,
-          hdfsCluster.getNamesystem(), 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS with empty file.
-   * 1. Create one empty file.
-   * 2. Call satisfyStoragePolicy for empty file.
-   * 3. SPS should skip this file and xattr should not be added for empty file.
-   */
-  @Test(timeout = 300000)
-  public void testSPSWhenFileLengthIsZero() throws Exception {
-    try {
-      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
-          STORAGES_PER_DATANODE, CAPACITY);
-      hdfsCluster.waitActive();
-      DistributedFileSystem fs = hdfsCluster.getFileSystem();
-      Path filePath = new Path("/zeroSizeFile");
-      DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
-      FSEditLog editlog = hdfsCluster.getNameNode().getNamesystem()
-          .getEditLog();
-      long lastWrittenTxId = editlog.getLastWrittenTxId();
-      fs.satisfyStoragePolicy(filePath);
-      Assert.assertEquals("Xattr should not be added for the file",
-          lastWrittenTxId, editlog.getLastWrittenTxId());
-      INode inode = hdfsCluster.getNameNode().getNamesystem().getFSDirectory()
-          .getINode(filePath.toString());
-      Assert.assertTrue("XAttrFeature should be null for file",
-          inode.getXAttrFeature() == null);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS for low redundant file blocks.
-   * 1. Create cluster with 3 datanode.
-   * 1. Create one file with 3 replica.
-   * 2. Set policy and call satisfyStoragePolicy for file.
-   * 3. Stop NameNode and Datanodes.
-   * 4. Start NameNode with 2 datanode and wait for block movement.
-   * 5. Start third datanode.
-   * 6. Third Datanode replica also should be moved in proper
-   * sorage based on policy.
-   */
-  @Test(timeout = 300000)
-  public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-    try {
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
-          "3000");
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
-          "5000");
-      StorageType[][] newtypes = new StorageType[][] {
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK}};
-      hdfsCluster = startCluster(config, newtypes, 3, 2, CAPACITY);
-      hdfsCluster.waitActive();
-      DistributedFileSystem fs = hdfsCluster.getFileSystem();
-      Path filePath = new Path("/zeroSizeFile");
-      DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0);
-      fs.setStoragePolicy(filePath, "COLD");
-      List<DataNodeProperties> list = new ArrayList<>();
-      list.add(hdfsCluster.stopDataNode(0));
-      list.add(hdfsCluster.stopDataNode(0));
-      list.add(hdfsCluster.stopDataNode(0));
-      restartNamenode();
-      hdfsCluster.restartDataNode(list.get(0), false);
-      hdfsCluster.restartDataNode(list.get(1), false);
-      hdfsCluster.waitActive();
-      fs.satisfyStoragePolicy(filePath);
-      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
-          StorageType.ARCHIVE, 2, 30000, hdfsCluster.getFileSystem());
-      hdfsCluster.restartDataNode(list.get(2), false);
-      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
-          StorageType.ARCHIVE, 3, 30000, hdfsCluster.getFileSystem());
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS for extra redundant file blocks.
-   * 1. Create cluster with 5 datanode.
-   * 2. Create one file with 5 replica.
-   * 3. Set file replication to 3.
-   * 4. Set policy and call satisfyStoragePolicy for file.
-   * 5. Block should be moved successfully.
-   */
-  @Test(timeout = 300000)
-  public void testSPSWhenFileHasExcessRedundancyBlocks() throws Exception {
-    try {
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
-          "3000");
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
-          "5000");
-      StorageType[][] newtypes = new StorageType[][] {
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK}};
-      hdfsCluster = startCluster(config, newtypes, 5, 2, CAPACITY);
-      hdfsCluster.waitActive();
-      DistributedFileSystem fs = hdfsCluster.getFileSystem();
-      Path filePath = new Path("/zeroSizeFile");
-      DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0);
-      fs.setReplication(filePath, (short) 3);
-      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
-          LogFactory.getLog(BlockStorageMovementAttemptedItems.class));
-      fs.setStoragePolicy(filePath, "COLD");
-      fs.satisfyStoragePolicy(filePath);
-      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
-          StorageType.ARCHIVE, 3, 60000, hdfsCluster.getFileSystem());
-      assertFalse("Log output does not contain expected log message: ",
-          logs.getOutput().contains("some of the blocks are low redundant"));
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS for empty directory, xAttr should be removed.
-   */
-  @Test(timeout = 300000)
-  public void testSPSForEmptyDirectory() throws IOException, TimeoutException,
-      InterruptedException {
-    try {
-      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
-          STORAGES_PER_DATANODE, CAPACITY);
-      hdfsCluster.waitActive();
-      DistributedFileSystem fs = hdfsCluster.getFileSystem();
-      Path emptyDir = new Path("/emptyDir");
-      fs.mkdirs(emptyDir);
-      fs.satisfyStoragePolicy(emptyDir);
-      // Make sure satisfy xattr has been removed.
-      DFSTestUtil.waitForXattrRemoved("/emptyDir",
-          XATTR_SATISFY_STORAGE_POLICY, hdfsCluster.getNamesystem(), 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS for not exist directory.
-   */
-  @Test(timeout = 300000)
-  public void testSPSForNonExistDirectory() throws Exception {
-    try {
-      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
-          STORAGES_PER_DATANODE, CAPACITY);
-      hdfsCluster.waitActive();
-      DistributedFileSystem fs = hdfsCluster.getFileSystem();
-      Path emptyDir = new Path("/emptyDir");
-      try {
-        fs.satisfyStoragePolicy(emptyDir);
-        fail("FileNotFoundException should throw");
-      } catch (FileNotFoundException e) {
-        // nothing to do
-      }
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS for directory tree which doesn't have files.
-   */
-  @Test(timeout = 300000)
-  public void testSPSWithDirectoryTreeWithoutFile() throws Exception {
-    try {
-      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
-          STORAGES_PER_DATANODE, CAPACITY);
-      hdfsCluster.waitActive();
-      // Create directories
-      /*
-       *                   root
-       *                    |
-       *           A--------C--------D
-       *                    |
-       *               G----H----I
-       *                    |
-       *                    O
-       */
-      DistributedFileSystem fs = hdfsCluster.getFileSystem();
-      fs.mkdirs(new Path("/root/C/H/O"));
-      fs.mkdirs(new Path("/root/A"));
-      fs.mkdirs(new Path("/root/D"));
-      fs.mkdirs(new Path("/root/C/G"));
-      fs.mkdirs(new Path("/root/C/I"));
-      fs.satisfyStoragePolicy(new Path("/root"));
-      // Make sure satisfy xattr has been removed.
-      DFSTestUtil.waitForXattrRemoved("/root",
-          XATTR_SATISFY_STORAGE_POLICY, hdfsCluster.getNamesystem(), 30000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS for directory which has multilevel directories.
-   */
-  @Test(timeout = 300000)
-  public void testMultipleLevelDirectoryForSatisfyStoragePolicy()
-      throws Exception {
-    try {
-      StorageType[][] diskTypes = new StorageType[][] {
-          {StorageType.DISK, StorageType.ARCHIVE},
-          {StorageType.ARCHIVE, StorageType.SSD},
-          {StorageType.DISK, StorageType.DISK}};
-      config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
-          STORAGES_PER_DATANODE, CAPACITY);
-      dfs = hdfsCluster.getFileSystem();
-      createDirectoryTree(dfs);
-
-      List<String> files = getDFSListOfTree();
-      dfs.setStoragePolicy(new Path("/root"), COLD);
-      dfs.satisfyStoragePolicy(new Path("/root"));
-      for (String fileName : files) {
-        // Wait till the block is moved to ARCHIVE
-        DFSTestUtil.waitExpectedStorageType(fileName, StorageType.ARCHIVE, 2,
-            30000, dfs);
-      }
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  /**
-   * Test SPS for batch processing.
-   */
-  @Test(timeout = 3000000)
-  public void testBatchProcessingForSPSDirectory() throws Exception {
-    try {
-      StorageType[][] diskTypes = new StorageType[][] {
-          {StorageType.DISK, StorageType.ARCHIVE},
-          {StorageType.ARCHIVE, StorageType.SSD},
-          {StorageType.DISK, StorageType.DISK}};
-      config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-      // Set queue max capacity
-      config.setInt(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY,
-          5);
-      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
-          STORAGES_PER_DATANODE, CAPACITY);
-      dfs = hdfsCluster.getFileSystem();
-      createDirectoryTree(dfs);
-      List<String> files = getDFSListOfTree();
-      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory
-          .getLog(FSTreeTraverser.class));
-
-      dfs.setStoragePolicy(new Path("/root"), COLD);
-      dfs.satisfyStoragePolicy(new Path("/root"));
-      for (String fileName : files) {
-        // Wait till the block is moved to ARCHIVE
-        DFSTestUtil.waitExpectedStorageType(fileName, StorageType.ARCHIVE, 2,
-            30000, dfs);
-      }
-      waitForBlocksMovementAttemptReport(files.size(), 30000);
-      String expectedLogMessage = "StorageMovementNeeded queue remaining"
-          + " capacity is zero";
-      assertTrue("Log output does not contain expected log message: "
-          + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-
-  /**
-   *  Test traverse when parent got deleted.
-   *  1. Delete /root when traversing Q
-   *  2. U, R, S should not be in queued.
-   */
-  @Test(timeout = 300000)
-  public void testTraverseWhenParentDeleted() throws Exception {
-    StorageType[][] diskTypes = new StorageType[][] {
-        {StorageType.DISK, StorageType.ARCHIVE},
-        {StorageType.ARCHIVE, StorageType.SSD},
-        {StorageType.DISK, StorageType.DISK}};
-    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
-    hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
-        STORAGES_PER_DATANODE, CAPACITY);
-    dfs = hdfsCluster.getFileSystem();
-    createDirectoryTree(dfs);
-
-    List<String> expectedTraverseOrder = getDFSListOfTree();
-
-    //Remove files which will not be traverse when parent is deleted
-    expectedTraverseOrder.remove("/root/D/L/R");
-    expectedTraverseOrder.remove("/root/D/L/S");
-    expectedTraverseOrder.remove("/root/D/L/Q/U");
-    FSDirectory fsDir = hdfsCluster.getNamesystem().getFSDirectory();
-
-    //Queue limit can control the traverse logic to wait for some free
-    //entry in queue. After 10 files, traverse control will be on U.
-    StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
-    Context ctxt = new IntraSPSNameNodeContext(
-        hdfsCluster.getNamesystem(),
-        hdfsCluster.getNamesystem().getBlockManager(), sps) {
-      @Override
-      public boolean isInSafeMode() {
-        return false;
-      }
-
-      @Override
-      public boolean isRunning() {
-        return true;
-      }
-    };
-
-    sps.init(ctxt);
-    sps.getStorageMovementQueue().activate();
-
-    INode rootINode = fsDir.getINode("/root");
-    hdfsCluster.getNamesystem().getBlockManager().getSPSManager()
-        .addPathId(rootINode.getId());
-
-    //Wait for thread to reach U.
-    Thread.sleep(1000);
-    dfs.delete(new Path("/root/D/L"), true);
-
-
-    assertTraversal(expectedTraverseOrder, fsDir, sps);
-    dfs.delete(new Path("/root"), true);
-  }
-
-  /**
-   *  Test traverse when root parent got deleted.
-   *  1. Delete L when traversing Q
-   *  2. E, M, U, R, S should not be in queued.
-   */
-  @Test(timeout = 300000)
-  public void testTraverseWhenRootParentDeleted() throws Exception {
-    StorageType[][] diskTypes = new StorageType[][] {
-        {StorageType.DISK, StorageType.ARCHIVE},
-        {StorageType.ARCHIVE, StorageType.SSD},
-        {StorageType.DISK, StorageType.DISK}};
-    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-    config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
-    hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
-        STORAGES_PER_DATANODE, CAPACITY);
-    dfs = hdfsCluster.getFileSystem();
-    createDirectoryTree(dfs);
-
-    List<String> expectedTraverseOrder = getDFSListOfTree();
-
-    // Remove files which will not be traverse when parent is deleted
-    expectedTraverseOrder.remove("/root/D/L/R");
-    expectedTraverseOrder.remove("/root/D/L/S");
-    expectedTraverseOrder.remove("/root/D/L/Q/U");
-    expectedTraverseOrder.remove("/root/D/M");
-    expectedTraverseOrder.remove("/root/E");
-    FSDirectory fsDir = hdfsCluster.getNamesystem().getFSDirectory();
-
-    // Queue limit can control the traverse logic to wait for some free
-    // entry in queue. After 10 files, traverse control will be on U.
-    StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
-    Context ctxt = new IntraSPSNameNodeContext(
-        hdfsCluster.getNamesystem(),
-        hdfsCluster.getNamesystem().getBlockManager(), sps) {
-      @Override
-      public boolean isInSafeMode() {
-        return false;
-      }
-
-      @Override
-      public boolean isRunning() {
-        return true;
-      }
-    };
-    sps.init(ctxt);
-    sps.getStorageMovementQueue().activate();
-
-    INode rootINode = fsDir.getINode("/root");
-    hdfsCluster.getNamesystem().getBlockManager().getSPSManager()
-        .addPathId(rootINode.getId());
-
-    // Wait for thread to reach U.
-    Thread.sleep(1000);
-
-    dfs.delete(new Path("/root/D/L"), true);
-
-    assertTraversal(expectedTraverseOrder, fsDir, sps);
-    dfs.delete(new Path("/root"), true);
-  }
-
-  private void assertTraversal(List<String> expectedTraverseOrder,
-      FSDirectory fsDir, StoragePolicySatisfier sps)
-          throws InterruptedException {
-    // Remove 10 element and make queue free, So other traversing will start.
-    for (int i = 0; i < 10; i++) {
-      String path = expectedTraverseOrder.remove(0);
-      ItemInfo itemInfo = sps.getStorageMovementQueue().get();
-      if (itemInfo == null) {
-        continue;
-      }
-      Long trackId = itemInfo.getFile();
-      INode inode = fsDir.getInode(trackId);
-      assertTrue("Failed to traverse tree, expected " + path + " but got "
-          + inode.getFullPathName(), path.equals(inode.getFullPathName()));
-    }
-    // Wait to finish tree traverse
-    Thread.sleep(5000);
-
-    // Check other element traversed in order and E, M, U, R, S should not be
-    // added in queue which we already removed from expected list
-    for (String path : expectedTraverseOrder) {
-      ItemInfo itemInfo = sps.getStorageMovementQueue().get();
-      if (itemInfo == null) {
-        continue;
-      }
-      Long trackId = itemInfo.getFile();
-      INode inode = fsDir.getInode(trackId);
-      assertTrue("Failed to traverse tree, expected " + path + " but got "
-          + inode.getFullPathName(), path.equals(inode.getFullPathName()));
-    }
-  }
-
-  /**
-   * Test storage move blocks while under replication block tasks exists in the
-   * system. So, both will share the max transfer streams.
-   *
-   * 1. Create cluster with 3 datanode.
-   * 2. Create 20 files with 2 replica.
-   * 3. Start 2 more DNs with DISK & SSD types
-   * 4. SetReplication factor for the 1st 10 files to 4 to trigger replica task
-   * 5. Set policy to SSD to the 2nd set of files from 11-20
-   * 6. Call SPS for 11-20 files to trigger move block tasks to new DNs
-   * 7. Wait for the under replica and SPS tasks completion
-   */
-  @Test(timeout = 300000)
-  public void testMoveBlocksWithUnderReplicatedBlocks() throws Exception {
-    try {
-      config.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 3);
-      config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
-          "3000");
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
-          "5000");
-      config.setBoolean(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_KEY,
-          false);
-
-      StorageType[][] storagetypes = new StorageType[][] {
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK}};
-
-      hdfsCluster = startCluster(config, storagetypes, 2, 2, CAPACITY);
-      hdfsCluster.waitActive();
-      dfs = hdfsCluster.getFileSystem();
-
-      // Below files will be used for pending replication block tasks.
-      for (int i=1; i<=20; i++){
-        Path filePath = new Path("/file" + i);
-        DFSTestUtil.createFile(dfs, filePath, DEFAULT_BLOCK_SIZE * 5, (short) 2,
-            0);
-      }
-
-      StorageType[][] newtypes =
-          new StorageType[][]{{StorageType.DISK, StorageType.SSD},
-              {StorageType.DISK, StorageType.SSD}};
-      startAdditionalDNs(config, 2, NUM_OF_DATANODES, newtypes,
-          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
-
-      // increase replication factor to 4 for the first 10 files and thus
-      // initiate replica tasks
-      for (int i=1; i<=10; i++){
-        Path filePath = new Path("/file" + i);
-        dfs.setReplication(filePath, (short) 4);
-      }
-
-      // invoke SPS for 11-20 files
-      for (int i = 11; i <= 20; i++) {
-        Path filePath = new Path("/file" + i);
-        dfs.setStoragePolicy(filePath, "ALL_SSD");
-        dfs.satisfyStoragePolicy(filePath);
-      }
-
-      for (int i = 1; i <= 10; i++) {
-        Path filePath = new Path("/file" + i);
-        DFSTestUtil.waitExpectedStorageType(filePath.toString(),
-            StorageType.DISK, 4, 60000, hdfsCluster.getFileSystem());
-      }
-      for (int i = 11; i <= 20; i++) {
-        Path filePath = new Path("/file" + i);
-        DFSTestUtil.waitExpectedStorageType(filePath.toString(),
-            StorageType.SSD, 2, 30000, hdfsCluster.getFileSystem());
-      }
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  @Test(timeout = 300000)
-  public void testStoragePolicySatisfyPathStatus() throws Exception {
-    try {
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
-          "3000");
-      config.setBoolean(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_KEY,
-          false);
-
-      StorageType[][] storagetypes = new StorageType[][] {
-          {StorageType.ARCHIVE, StorageType.DISK},
-          {StorageType.ARCHIVE, StorageType.DISK}};
-      hdfsCluster = startCluster(config, storagetypes, 2, 2, CAPACITY);
-      hdfsCluster.waitActive();
-      // BlockStorageMovementNeeded.setStatusClearanceElapsedTimeMs(200000);
-      dfs = hdfsCluster.getFileSystem();
-      Path filePath = new Path("/file");
-      DFSTestUtil.createFile(dfs, filePath, 1024, (short) 2,
-            0);
-      dfs.setStoragePolicy(filePath, "COLD");
-      dfs.satisfyStoragePolicy(filePath);
-      Thread.sleep(3000);
-      StoragePolicySatisfyPathStatus status = dfs.getClient()
-          .checkStoragePolicySatisfyPathStatus(filePath.toString());
-      Assert.assertTrue(
-          "Status should be IN_PROGRESS/SUCCESS, but status is " + status,
-          StoragePolicySatisfyPathStatus.IN_PROGRESS.equals(status)
-              || StoragePolicySatisfyPathStatus.SUCCESS.equals(status));
-      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
-          StorageType.ARCHIVE, 2, 30000, dfs);
-
-      // wait till status is SUCCESS
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          try {
-            StoragePolicySatisfyPathStatus status = dfs.getClient()
-                .checkStoragePolicySatisfyPathStatus(filePath.toString());
-            return StoragePolicySatisfyPathStatus.SUCCESS.equals(status);
-          } catch (IOException e) {
-            Assert.fail("Fail to get path status for sps");
-          }
-          return false;
-        }
-      }, 100, 60000);
-      BlockStorageMovementNeeded.setStatusClearanceElapsedTimeMs(1000);
-      // wait till status is NOT_AVAILABLE
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          try {
-            StoragePolicySatisfyPathStatus status = dfs.getClient()
-                .checkStoragePolicySatisfyPathStatus(filePath.toString());
-            return StoragePolicySatisfyPathStatus.NOT_AVAILABLE.equals(status);
-          } catch (IOException e) {
-            Assert.fail("Fail to get path status for sps");
-          }
-          return false;
-        }
-      }, 100, 60000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  @Test(timeout = 300000)
-  public void testMaxRetryForFailedBlock() throws Exception {
-    try {
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
-          "1000");
-      config.set(DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
-          "1000");
-      StorageType[][] storagetypes = new StorageType[][] {
-          {StorageType.DISK, StorageType.DISK},
-          {StorageType.DISK, StorageType.DISK}};
-      hdfsCluster = startCluster(config, storagetypes, 2, 2, CAPACITY);
-      hdfsCluster.waitActive();
-      dfs = hdfsCluster.getFileSystem();
-
-      Path filePath = new Path("/retryFile");
-      DFSTestUtil.createFile(dfs, filePath, DEFAULT_BLOCK_SIZE, (short) 2,
-          0);
-
-      dfs.setStoragePolicy(filePath, "COLD");
-      dfs.satisfyStoragePolicy(filePath);
-      Thread.sleep(3000
-          * DFSConfigKeys
-          .DFS_STORAGE_POLICY_SATISFIER_MAX_RETRY_ATTEMPTS_DEFAULT);
-      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
-          StorageType.DISK, 2, 60000, hdfsCluster.getFileSystem());
-      // Path status should be FAILURE
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          try {
-            StoragePolicySatisfyPathStatus status = dfs.getClient()
-                .checkStoragePolicySatisfyPathStatus(filePath.toString());
-            return StoragePolicySatisfyPathStatus.FAILURE.equals(status);
-          } catch (IOException e) {
-            Assert.fail("Fail to get path status for sps");
-          }
-          return false;
-        }
-      }, 100, 90000);
-    } finally {
-      shutdownCluster();
-    }
-  }
-
-  private static void createDirectoryTree(DistributedFileSystem dfs)
-      throws Exception {
-    // tree structure
-    /*
-     *                           root
-     *                             |
-     *           A--------B--------C--------D--------E
-     *                    |                 |
-     *          F----G----H----I       J----K----L----M
-     *               |                           |
-     *          N----O----P                 Q----R----S
-     *                    |                 |
-     *                    T                 U
-     */
-    // create root Node and child
-    dfs.mkdirs(new Path("/root"));
-    DFSTestUtil.createFile(dfs, new Path("/root/A"), 1024, (short) 3, 0);
-    dfs.mkdirs(new Path("/root/B"));
-    DFSTestUtil.createFile(dfs, new Path("/root/C"), 1024, (short) 3, 0);
-    dfs.mkdirs(new Path("/root/D"));
-    DFSTestUtil.createFile(dfs, new Path("/root/E"), 1024, (short) 3, 0);
-
-    // Create /root/B child
-    DFSTestUtil.createFile(dfs, new Path("/root/B/F"), 1024, (short) 3, 0);
-    dfs.mkdirs(new Path("/root/B/G"));
-    DFSTestUtil.createFile(dfs, new Path("/root/B/H"), 1024, (short) 3, 0);
-    DFSTestUtil.createFile(dfs, new Path("/root/B/I"), 1024, (short) 3, 0);
-
-    // Create /root/D child
-    DFSTestUtil.createFile(dfs, new Path("/root/D/J"), 1024, (short) 3, 0);
-    DFSTestUtil.createFile(dfs, new Path("/root/D/K"), 1024, (short) 3, 0);
-    dfs.mkdirs(new Path("/root/D/L"));
-    DFSTestUtil.createFile(dfs, new Path("/root/D/M"), 1024, (short) 3, 0);
-
-    // Create /root/B/G child
-    DFSTestUtil.createFile(dfs, new Path("/root/B/G/N"), 1024, (short) 3, 0);
-    DFSTestUtil.createFile(dfs, new Path("/root/B/G/O"), 1024, (short) 3, 0);
-    dfs.mkdirs(new Path("/root/B/G/P"));
-
-    // Create /root/D/L child
-    dfs.mkdirs(new Path("/root/D/L/Q"));
-    DFSTestUtil.createFile(dfs, new Path("/root/D/L/R"), 1024, (short) 3, 0);
-    DFSTestUtil.createFile(dfs, new Path("/root/D/L/S"), 1024, (short) 3, 0);
-
-    // Create /root/B/G/P child
-    DFSTestUtil.createFile(dfs, new Path("/root/B/G/P/T"), 1024, (short) 3, 0);
-
-    // Create /root/D/L/Q child
-    DFSTestUtil.createFile(dfs, new Path("/root/D/L/Q/U"), 1024, (short) 3, 0);
-  }
-
-  private List<String> getDFSListOfTree() {
-    List<String> dfsList = new ArrayList<>();
-    dfsList.add("/root/A");
-    dfsList.add("/root/B/F");
-    dfsList.add("/root/B/G/N");
-    dfsList.add("/root/B/G/O");
-    dfsList.add("/root/B/G/P/T");
-    dfsList.add("/root/B/H");
-    dfsList.add("/root/B/I");
-    dfsList.add("/root/C");
-    dfsList.add("/root/D/J");
-    dfsList.add("/root/D/K");
-    dfsList.add("/root/D/L/Q/U");
-    dfsList.add("/root/D/L/R");
-    dfsList.add("/root/D/L/S");
-    dfsList.add("/root/D/M");
-    dfsList.add("/root/E");
-    return dfsList;
-  }
-
-  private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
-      throws IOException {
-    ArrayList<DataNode> dns = hdfsCluster.getDataNodes();
-    final String file1 = "/testMoveWithBlockPinning";
-    // replication factor 3
-    InetSocketAddress[] favoredNodes = new InetSocketAddress[favoredNodesCount];
-    for (int i = 0; i < favoredNodesCount; i++) {
-      favoredNodes[i] = dns.get(i).getXferAddress();
-    }
-    DFSTestUtil.createFile(dfs, new Path(file1), false, 1024, 100,
-        DEFAULT_BLOCK_SIZE, (short) 3, 0, false, favoredNodes);
-
-    LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
-    Assert.assertEquals("Wrong block count", 1,
-        locatedBlocks.locatedBlockCount());
-
-    // verify storage type before movement
-    LocatedBlock lb = locatedBlocks.get(0);
-    StorageType[] storageTypes = lb.getStorageTypes();
-    for (StorageType storageType : storageTypes) {
-      Assert.assertTrue(StorageType.DISK == storageType);
-    }
-
-    // Mock FsDatasetSpi#getPinning to show that the block is pinned.
-    DatanodeInfo[] locations = lb.getLocations();
-    Assert.assertEquals(3, locations.length);
-    Assert.assertTrue(favoredNodesCount < locations.length);
-    for(DatanodeInfo dnInfo: locations){
-      LOG.info("Simulate block pinning in datanode {}",
-          locations[favoredNodesCount]);
-      DataNode dn = hdfsCluster.getDataNode(dnInfo.getIpcPort());
-      InternalDataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
-      favoredNodesCount--;
-      if (favoredNodesCount <= 0) {
-        break; // marked favoredNodesCount number of pinned block location
-      }
-    }
-    return file1;
-  }
-
-  public void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
-      int timeout) throws TimeoutException, InterruptedException {
-    BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
-    final StoragePolicySatisfier sps =
-        (StoragePolicySatisfier) blockManager.getSPSManager()
-        .getInternalSPSService();
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        LOG.info("expectedAttemptedItemsCount={} actualAttemptedItemsCount={}",
-            expectedBlkMovAttemptedCount,
-            ((BlockStorageMovementAttemptedItems) (sps
-                .getAttemptedItemsMonitor())).getAttemptedItemsCount());
-        return ((BlockStorageMovementAttemptedItems) (sps
-            .getAttemptedItemsMonitor()))
-            .getAttemptedItemsCount() == expectedBlkMovAttemptedCount;
-      }
-    }, 100, timeout);
-  }
-
-  public void waitForBlocksMovementAttemptReport(
-      long expectedMovementFinishedBlocksCount, int timeout)
-          throws TimeoutException, InterruptedException {
-    BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
-    final StoragePolicySatisfier sps =
-        (StoragePolicySatisfier) blockManager.getSPSManager()
-        .getInternalSPSService();
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        int actualCount = ((BlockStorageMovementAttemptedItems) (sps
-            .getAttemptedItemsMonitor())).getAttemptedItemsCount();
-        LOG.info("MovementFinishedBlocks: expectedCount={} actualCount={}",
-            expectedMovementFinishedBlocksCount, actualCount);
-        return actualCount
-            >= expectedMovementFinishedBlocksCount;
-      }
-    }, 100, timeout);
-  }
-
-  public void writeContent(final String fileName) throws IOException {
-    writeContent(fileName, (short) 3);
-  }
-
-  private void writeContent(final String fileName, short replicatonFactor)
-      throws IOException {
-    // write to DISK
-    final FSDataOutputStream out = dfs.create(new Path(fileName),
-        replicatonFactor);
-    for (int i = 0; i < 1024; i++) {
-      out.write(i);
-    }
-    out.close();
-  }
-
-  private void startAdditionalDNs(final Configuration conf,
-      int newNodesRequired, int existingNodesNum, StorageType[][] newTypes,
-      int storagesPerDn, long nodeCapacity, final MiniDFSCluster cluster)
-          throws IOException {
-    long[][] capacities;
-    existingNodesNum += newNodesRequired;
-    capacities = new long[newNodesRequired][storagesPerDn];
-    for (int i = 0; i < newNodesRequired; i++) {
-      for (int j = 0; j < storagesPerDn; j++) {
-        capacities[i][j] = nodeCapacity;
-      }
-    }
-
-    cluster.startDataNodes(conf, newNodesRequired, newTypes, true, null, null,
-        null, capacities, null, false, false, false, null);
-    cluster.triggerHeartbeats();
-  }
-
-  public MiniDFSCluster startCluster(final Configuration conf,
-      StorageType[][] storageTypes, int numberOfDatanodes, int storagesPerDn,
-      long nodeCapacity) throws IOException {
-    long[][] capacities = new long[numberOfDatanodes][storagesPerDn];
-    for (int i = 0; i < numberOfDatanodes; i++) {
-      for (int j = 0; j < storagesPerDn; j++) {
-        capacities[i][j] = nodeCapacity;
-      }
-    }
-    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(numberOfDatanodes).storagesPerDatanode(storagesPerDn)
-        .storageTypes(storageTypes).storageCapacities(capacities).build();
-    cluster.waitActive();
-    return cluster;
-  }
-
-  public void restartNamenode() throws IOException {
-    hdfsCluster.restartNameNodes();
-    hdfsCluster.waitActive();
-  }
-
-  /**
-   * Implementation of listener callback, where it collects all the sps move
-   * attempted blocks for assertion.
-   */
-  public static final class ExternalBlockMovementListener
-      implements BlockMovementListener {
-
-    private List<Block> actualBlockMovements = new ArrayList<>();
-
-    @Override
-    public void notifyMovementTriedBlocks(Block[] moveAttemptFinishedBlks) {
-      for (Block block : moveAttemptFinishedBlks) {
-        actualBlockMovements.add(block);
-      }
-      LOG.info("Movement attempted blocks:{}", actualBlockMovements);
-    }
-
-    public List<Block> getActualBlockMovements() {
-      return actualBlockMovements;
-    }
-
-    public void clear() {
-      actualBlockMovements.clear();
-    }
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
index 8a25a5e..250e54b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -42,7 +42,9 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
@@ -70,6 +72,9 @@
   private int cellSize;
   private int defaultStripeBlockSize;
   private Configuration conf;
+  private StoragePolicySatisfier sps;
+  private ExternalSPSContext ctxt;
+  private NameNodeConnector nnc;
 
   private ErasureCodingPolicy getEcPolicy() {
     return StripedFileTestUtil.getDefaultECPolicy();
@@ -87,7 +92,7 @@
     defaultStripeBlockSize = cellSize * stripesPerBlock;
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
+        StoragePolicySatisfierMode.EXTERNAL.toString());
     // Reduced refresh cycle to update latest datanodes.
     conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
         1000);
@@ -102,8 +107,8 @@
    */
   @Test(timeout = 300000)
   public void testMoverWithFullStripe() throws Exception {
-    // start 10 datanodes
-    int numOfDatanodes = 10;
+    // start 11 datanodes
+    int numOfDatanodes = 11;
     int storagesPerDatanode = 2;
     long capacity = 20 * defaultStripeBlockSize;
     long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -122,6 +127,7 @@
             {StorageType.DISK, StorageType.DISK},
             {StorageType.DISK, StorageType.DISK},
             {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
@@ -133,7 +139,7 @@
     HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
     try {
       cluster.waitActive();
-
+      startSPS();
       DistributedFileSystem dfs = cluster.getFileSystem();
       dfs.enableErasureCodingPolicy(
           StripedFileTestUtil.getDefaultECPolicy().getName());
@@ -189,12 +195,12 @@
       LOG.info("Sets storage policy to COLD and invoked satisfyStoragePolicy");
       cluster.triggerHeartbeats();
 
-      waitForBlocksMovementAttemptReport(cluster, 9, 60000);
       // verify storage types and locations
       waitExpectedStorageType(cluster, fooFile, fileLen, StorageType.ARCHIVE, 9,
           9, 60000);
     } finally {
       cluster.shutdown();
+      sps.stopGracefully();
     }
   }
 
@@ -213,7 +219,7 @@
   public void testWhenOnlyFewTargetNodesAreAvailableToSatisfyStoragePolicy()
       throws Exception {
     // start 10 datanodes
-    int numOfDatanodes = 10;
+    int numOfDatanodes = 11;
     int storagesPerDatanode = 2;
     long capacity = 20 * defaultStripeBlockSize;
     long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -234,6 +240,7 @@
             {StorageType.DISK, StorageType.DISK},
             {StorageType.DISK, StorageType.DISK},
             {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE}})
@@ -243,7 +250,7 @@
     HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
     try {
       cluster.waitActive();
-
+      startSPS();
       DistributedFileSystem dfs = cluster.getFileSystem();
       dfs.enableErasureCodingPolicy(
           StripedFileTestUtil.getDefaultECPolicy().getName());
@@ -271,6 +278,7 @@
           Assert.assertEquals(StorageType.DISK, type);
         }
       }
+      Thread.sleep(5000);
       StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
           dataBlocks + parityBlocks);
 
@@ -296,13 +304,13 @@
       LOG.info("Sets storage policy to COLD and invoked satisfyStoragePolicy");
       cluster.triggerHeartbeats();
 
-      waitForBlocksMovementAttemptReport(cluster, 5, 60000);
-      waitForAttemptedItems(cluster, 1, 30000);
+      waitForAttemptedItems(1, 30000);
       // verify storage types and locations.
       waitExpectedStorageType(cluster, fooFile, fileLen, StorageType.ARCHIVE, 5,
           9, 60000);
     } finally {
       cluster.shutdown();
+      sps.stopGracefully();
     }
   }
 
@@ -352,6 +360,7 @@
         .build();
     try {
       cluster.waitActive();
+      startSPS();
       DistributedFileSystem fs = cluster.getFileSystem();
       fs.enableErasureCodingPolicy(
           StripedFileTestUtil.getDefaultECPolicy().getName());
@@ -393,6 +402,7 @@
           StorageType.ARCHIVE, 9, 9, 60000);
     } finally {
       cluster.shutdown();
+      sps.stopGracefully();
     }
   }
 
@@ -444,6 +454,7 @@
     HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
     try {
       cluster.waitActive();
+      startSPS();
       DistributedFileSystem dfs = cluster.getFileSystem();
       dfs.enableErasureCodingPolicy(
           StripedFileTestUtil.getDefaultECPolicy().getName());
@@ -481,35 +492,25 @@
       LOG.info("Sets storage policy to COLD and invoked satisfyStoragePolicy");
       cluster.triggerHeartbeats();
 
-      waitForAttemptedItems(cluster, 1, 30000);
+      waitForAttemptedItems(1, 30000);
       // verify storage types and locations.
       waitExpectedStorageType(cluster, fooFile, fileLen, StorageType.DISK, 9, 9,
           60000);
-      waitForAttemptedItems(cluster, 1, 30000);
+      waitForAttemptedItems(1, 30000);
     } finally {
       cluster.shutdown();
+      sps.stopGracefully();
     }
   }
 
-  private void waitForAttemptedItems(MiniDFSCluster cluster,
-      long expectedBlkMovAttemptedCount, int timeout)
-          throws TimeoutException, InterruptedException {
-    BlockManager blockManager = cluster.getNamesystem().getBlockManager();
-    final StoragePolicySatisfier sps =
-        (StoragePolicySatisfier) blockManager
-        .getSPSManager().getInternalSPSService();
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        LOG.info("expectedAttemptedItemsCount={} actualAttemptedItemsCount={}",
-            expectedBlkMovAttemptedCount,
-            ((BlockStorageMovementAttemptedItems) sps
-                .getAttemptedItemsMonitor()).getAttemptedItemsCount());
-        return ((BlockStorageMovementAttemptedItems) sps
-            .getAttemptedItemsMonitor())
-                .getAttemptedItemsCount() == expectedBlkMovAttemptedCount;
-      }
-    }, 100, timeout);
+  private void startSPS() throws IOException {
+    nnc = DFSTestUtil.getNameNodeConnector(conf,
+        HdfsServerConstants.MOVER_ID_PATH, 1, false);
+
+    sps = new StoragePolicySatisfier(conf);
+    ctxt = new ExternalSPSContext(sps, nnc);
+    sps.init(ctxt);
+    sps.start(true, StoragePolicySatisfierMode.EXTERNAL);
   }
 
   private static void initConfWithStripe(Configuration conf,
@@ -562,24 +563,18 @@
     }, 100, timeout);
   }
 
-  // Check whether the block movement attempt report has been arrived at the
-  // Namenode(SPS).
-  private void waitForBlocksMovementAttemptReport(MiniDFSCluster cluster,
-      long expectedMoveFinishedBlks, int timeout)
-          throws TimeoutException, InterruptedException {
-    BlockManager blockManager = cluster.getNamesystem().getBlockManager();
-    final StoragePolicySatisfier sps =
-        (StoragePolicySatisfier) blockManager.getSPSManager()
-        .getInternalSPSService();
+  private void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
+      int timeout) throws TimeoutException, InterruptedException {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
-        int actualCount = ((BlockStorageMovementAttemptedItems) (sps
-            .getAttemptedItemsMonitor())).getMovementFinishedBlocksCount();
-        LOG.info("MovementFinishedBlocks: expectedCount={} actualCount={}",
-            expectedMoveFinishedBlks,
-            actualCount);
-        return actualCount >= expectedMoveFinishedBlks;
+        LOG.info("expectedAttemptedItemsCount={} actualAttemptedItemsCount={}",
+            expectedBlkMovAttemptedCount,
+            ((BlockStorageMovementAttemptedItems) (sps
+                .getAttemptedItemsMonitor())).getAttemptedItemsCount());
+        return ((BlockStorageMovementAttemptedItems) (sps
+            .getAttemptedItemsMonitor()))
+                .getAttemptedItemsCount() == expectedBlkMovAttemptedCount;
       }
     }, 100, timeout);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 18acb50..d9a93fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -32,34 +32,57 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_MAX_OUTSTANDING_PATHS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.URI;
+import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockStorageMovementAttemptedItems;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
-import org.apache.hadoop.hdfs.server.namenode.sps.TestStoragePolicySatisfier;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.SecurityUtil;
@@ -67,29 +90,57 @@
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
 /**
  * Tests the external sps service plugins.
  */
-public class TestExternalStoragePolicySatisfier
-    extends TestStoragePolicySatisfier {
+public class TestExternalStoragePolicySatisfier {
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
   private StorageType[][] allDiskTypes =
       new StorageType[][]{{StorageType.DISK, StorageType.DISK},
           {StorageType.DISK, StorageType.DISK},
           {StorageType.DISK, StorageType.DISK}};
-  private NameNodeConnector nnc;
   private File keytabFile;
   private String principal;
   private MiniKdc kdc;
   private File baseDir;
+  private NameNodeConnector nnc;
   private StoragePolicySatisfier externalSps;
   private ExternalSPSContext externalCtxt;
+  private DistributedFileSystem dfs = null;
+  private MiniDFSCluster hdfsCluster = null;
+  private Configuration config = null;
+  private static final int NUM_OF_DATANODES = 3;
+  private static final int STORAGES_PER_DATANODE = 2;
+  private static final long CAPACITY = 2 * 256 * 1024 * 1024;
+  private static final String FILE = "/testMoveToSatisfyStoragePolicy";
+  private static final int DEFAULT_BLOCK_SIZE = 1024;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestExternalStoragePolicySatisfier.class);
+
+  @Before
+  public void setUp() {
+    config = new HdfsConfiguration();
+    config.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+        StoragePolicySatisfierMode.EXTERNAL.toString());
+    // Most of the tests are restarting DNs and NN. So, reduced refresh cycle to
+    // update latest datanodes.
+    config.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
+        3000);
+    config.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+        StoragePolicySatisfierMode.EXTERNAL.toString());
+  }
 
   @After
   public void destroy() throws Exception {
@@ -97,26 +148,49 @@
       kdc.stop();
       FileUtil.fullyDelete(baseDir);
     }
+    if (hdfsCluster != null) {
+      hdfsCluster.shutdown();
+    }
   }
 
-  @Override
-  public void shutdownCluster() {
+  /**
+   * Sets hdfs cluster.
+   */
+  private void setCluster(MiniDFSCluster cluster) {
+    this.hdfsCluster = cluster;
+  }
+
+  /**
+   * @return conf.
+   */
+  private Configuration getConf() {
+    return this.config;
+  }
+
+  /**
+   * @return hdfs cluster.
+   */
+  private MiniDFSCluster getCluster() {
+    return hdfsCluster;
+  }
+
+  /**
+   * Gets distributed file system.
+   *
+   * @throws IOException
+   */
+  private DistributedFileSystem getFS() throws IOException {
+    this.dfs = hdfsCluster.getFileSystem();
+    return this.dfs;
+  }
+
+  private void shutdownCluster() {
     if (externalSps != null) {
       externalSps.stopGracefully();
     }
-    super.shutdownCluster();
   }
 
-  @Override
-  public void setUp() {
-    super.setUp();
-
-    getConf().set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.EXTERNAL.toString());
-  }
-
-  @Override
-  public void createCluster() throws IOException {
+  private void createCluster() throws IOException {
     getConf().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     setCluster(startCluster(getConf(), allDiskTypes, NUM_OF_DATANODES,
         STORAGES_PER_DATANODE, CAPACITY));
@@ -124,8 +198,7 @@
     writeContent(FILE);
   }
 
-  @Override
-  public MiniDFSCluster startCluster(final Configuration conf,
+  private MiniDFSCluster startCluster(final Configuration conf,
       StorageType[][] storageTypes, int numberOfDatanodes, int storagesPerDn,
       long nodeCapacity) throws IOException {
     long[][] capacities = new long[numberOfDatanodes][storagesPerDn];
@@ -139,7 +212,8 @@
         .storageTypes(storageTypes).storageCapacities(capacities).build();
     cluster.waitActive();
 
-    nnc = getNameNodeConnector(getConf());
+    nnc = DFSTestUtil.getNameNodeConnector(getConf(),
+        HdfsServerConstants.MOVER_ID_PATH, 1, false);
 
     externalSps = new StoragePolicySatisfier(getConf());
     externalCtxt = new ExternalSPSContext(externalSps, nnc);
@@ -149,7 +223,7 @@
     return cluster;
   }
 
-  public void restartNamenode() throws IOException{
+  private void restartNamenode() throws IOException{
     if (externalSps != null) {
       externalSps.stopGracefully();
     }
@@ -163,60 +237,6 @@
     externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL);
   }
 
-  private NameNodeConnector getNameNodeConnector(Configuration conf)
-      throws IOException {
-    final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-    Assert.assertEquals(1, namenodes.size());
-    final Path externalSPSPathId = HdfsServerConstants.MOVER_ID_PATH;
-    NameNodeConnector.checkOtherInstanceRunning(false);
-    while (true) {
-      try {
-        final List<NameNodeConnector> nncs = NameNodeConnector
-            .newNameNodeConnectors(namenodes,
-                StoragePolicySatisfier.class.getSimpleName(),
-                externalSPSPathId, conf,
-                NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
-        return nncs.get(0);
-      } catch (IOException e) {
-        LOG.warn("Failed to connect with namenode", e);
-        // Ignore
-      }
-
-    }
-  }
-
-  public void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
-      int timeout) throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        LOG.info("expectedAttemptedItemsCount={} actualAttemptedItemsCount={}",
-            expectedBlkMovAttemptedCount,
-            ((BlockStorageMovementAttemptedItems) (externalSps
-                .getAttemptedItemsMonitor())).getAttemptedItemsCount());
-        return ((BlockStorageMovementAttemptedItems) (externalSps
-            .getAttemptedItemsMonitor()))
-                .getAttemptedItemsCount() == expectedBlkMovAttemptedCount;
-      }
-    }, 100, timeout);
-  }
-
-  public void waitForBlocksMovementAttemptReport(
-      long expectedMovementFinishedBlocksCount, int timeout)
-          throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        int actualCount = externalSps.getAttemptedItemsMonitor()
-            .getAttemptedItemsCount();
-        LOG.info("MovementFinishedBlocks: expectedCount={} actualCount={}",
-            expectedMovementFinishedBlocksCount, actualCount);
-        return actualCount
-            >= expectedMovementFinishedBlocksCount;
-      }
-    }, 100, timeout);
-  }
-
   private void initSecureConf(Configuration conf) throws Exception {
     String username = "externalSPS";
     baseDir = GenericTestUtils
@@ -344,22 +364,6 @@
   }
 
   /**
-   * Test verifies status check when Satisfier is not running inside namenode.
-   */
-  @Test(timeout = 90000)
-  public void testStoragePolicySatisfyPathStatus() throws Exception {
-    createCluster();
-    DistributedFileSystem fs = getFS();
-    try {
-      fs.getClient().checkStoragePolicySatisfyPathStatus(FILE);
-      Assert.fail("Should throw exception as SPS is not running inside NN!");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains("Satisfier is not running"
-          + " inside namenode, so status can't be returned.", e);
-    }
-  }
-
-  /**
    * Tests to verify that SPS should be able to start when the Mover ID file
    * is not being hold by a Mover. This can be the case when Mover exits
    * ungracefully without deleting the ID file from HDFS.
@@ -399,17 +403,9 @@
   }
 
   /**
-   * Status won't be supported for external SPS, now. So, ignoring it.
-   */
-  @Ignore("Status is not supported for external SPS. So, ignoring it.")
-  public void testMaxRetryForFailedBlock() throws Exception {
-  }
-
-  /**
    * This test is specific to internal SPS. So, ignoring it.
    */
   @Ignore("This test is specific to internal SPS. So, ignoring it.")
-  @Override
   public void testTraverseWhenParentDeleted() throws Exception {
   }
 
@@ -417,7 +413,1238 @@
    * This test is specific to internal SPS. So, ignoring it.
    */
   @Ignore("This test is specific to internal SPS. So, ignoring it.")
-  @Override
   public void testTraverseWhenRootParentDeleted() throws Exception {
   }
+
+
+  @Test(timeout = 300000)
+  public void testWhenStoragePolicySetToCOLD()
+      throws Exception {
+
+    try {
+      createCluster();
+      doTestWhenStoragePolicySetToCOLD();
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  private void doTestWhenStoragePolicySetToCOLD() throws Exception {
+    // Change policy to COLD
+    dfs.setStoragePolicy(new Path(FILE), COLD);
+
+    StorageType[][] newtypes =
+        new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+    startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
+        STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+
+    hdfsCluster.triggerHeartbeats();
+    dfs.satisfyStoragePolicy(new Path(FILE));
+    // Wait till namenode notified about the block location details
+    DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 3, 35000,
+        dfs);
+  }
+
+  @Test(timeout = 300000)
+  public void testWhenStoragePolicySetToALLSSD()
+      throws Exception {
+    try {
+      createCluster();
+      // Change policy to ALL_SSD
+      dfs.setStoragePolicy(new Path(FILE), "ALL_SSD");
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK},
+              {StorageType.SSD, StorageType.DISK},
+              {StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier Identified that block to move to SSD
+      // areas
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 3, 30000, dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  @Test(timeout = 300000)
+  public void testWhenStoragePolicySetToONESSD()
+      throws Exception {
+    try {
+      createCluster();
+      // Change policy to ONE_SSD
+      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier Identified that block to move to SSD
+      // areas
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
+          dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify that the block storage movement report will be propagated
+   * to Namenode via datanode heartbeat.
+   */
+  @Test(timeout = 300000)
+  public void testBlksStorageMovementAttemptFinishedReport() throws Exception {
+    try {
+      createCluster();
+      // Change policy to ONE_SSD
+      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+
+      // Wait till the block is moved to SSD areas
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
+          dfs);
+
+      waitForBlocksMovementAttemptReport(1, 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify that multiple files are giving to satisfy storage policy
+   * and should work well altogether.
+   */
+  @Test(timeout = 300000)
+  public void testMultipleFilesForSatisfyStoragePolicy() throws Exception {
+    try {
+      createCluster();
+      List<String> files = new ArrayList<>();
+      files.add(FILE);
+
+      // Creates 4 more files. Send all of them for satisfying the storage
+      // policy together.
+      for (int i = 0; i < 4; i++) {
+        String file1 = "/testMoveWhenStoragePolicyNotSatisfying_" + i;
+        files.add(file1);
+        writeContent(file1);
+      }
+      // Change policy to ONE_SSD
+      for (String fileName : files) {
+        dfs.setStoragePolicy(new Path(fileName), ONE_SSD);
+        dfs.satisfyStoragePolicy(new Path(fileName));
+      }
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+
+      // Making sure SDD based nodes added to cluster. Adding SSD based
+      // datanodes.
+      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+      hdfsCluster.triggerHeartbeats();
+
+      for (String fileName : files) {
+        // Wait till the block is moved to SSD areas
+        DFSTestUtil.waitExpectedStorageType(
+            fileName, StorageType.SSD, 1, 30000, dfs);
+        DFSTestUtil.waitExpectedStorageType(
+            fileName, StorageType.DISK, 2, 30000, dfs);
+      }
+
+      waitForBlocksMovementAttemptReport(files.size(), 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify hdfsAdmin.satisfyStoragePolicy works well for file.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSatisfyFileWithHdfsAdmin() throws Exception {
+    try {
+      createCluster();
+      HdfsAdmin hdfsAdmin =
+          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(FILE), COLD);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+              {StorageType.DISK, StorageType.ARCHIVE},
+              {StorageType.DISK, StorageType.ARCHIVE}};
+      startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+
+      hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
+
+      hdfsCluster.triggerHeartbeats();
+      // Wait till namenode notified about the block location details
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 3, 30000,
+          dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify hdfsAdmin.satisfyStoragePolicy works well for dir.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSatisfyDirWithHdfsAdmin() throws Exception {
+    try {
+      createCluster();
+      HdfsAdmin hdfsAdmin =
+          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+      final String subDir = "/subDir";
+      final String subFile1 = subDir + "/subFile1";
+      final String subDir2 = subDir + "/subDir2";
+      final String subFile2 = subDir2 + "/subFile2";
+      dfs.mkdirs(new Path(subDir));
+      writeContent(subFile1);
+      dfs.mkdirs(new Path(subDir2));
+      writeContent(subFile2);
+
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(subDir), ONE_SSD);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.SSD, StorageType.DISK}};
+      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+
+      hdfsAdmin.satisfyStoragePolicy(new Path(subDir));
+
+      hdfsCluster.triggerHeartbeats();
+
+      // take effect for the file in the directory.
+      DFSTestUtil.waitExpectedStorageType(
+          subFile1, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          subFile1, StorageType.DISK, 2, 30000, dfs);
+
+      // take no effect for the sub-dir's file in the directory.
+      DFSTestUtil.waitExpectedStorageType(
+          subFile2, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          subFile2, StorageType.DISK, 2, 30000, dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify hdfsAdmin.satisfyStoragePolicy exceptions.
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSatisfyWithExceptions() throws Exception {
+    try {
+      createCluster();
+      final String nonExistingFile = "/noneExistingFile";
+      hdfsCluster.getConfiguration(0).
+          setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
+      restartNamenode();
+      HdfsAdmin hdfsAdmin =
+          new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+
+      try {
+        hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
+        Assert.fail(String.format(
+            "Should failed to satisfy storage policy "
+                + "for %s since %s is set to false.",
+            FILE, DFS_STORAGE_POLICY_ENABLED_KEY));
+      } catch (IOException e) {
+        GenericTestUtils.assertExceptionContains(String.format(
+            "Failed to satisfy storage policy since %s is set to false.",
+            DFS_STORAGE_POLICY_ENABLED_KEY), e);
+      }
+
+      hdfsCluster.getConfiguration(0).
+          setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
+      restartNamenode();
+
+      hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(config), config);
+      try {
+        hdfsAdmin.satisfyStoragePolicy(new Path(nonExistingFile));
+        Assert.fail("Should throw FileNotFoundException for " +
+            nonExistingFile);
+      } catch (FileNotFoundException e) {
+
+      }
+
+      try {
+        hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
+        hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
+      } catch (Exception e) {
+        Assert.fail(String.format("Allow to invoke mutlipe times "
+            + "#satisfyStoragePolicy() api for a path %s , internally just "
+            + "skipping addtion to satisfy movement queue.", FILE));
+      }
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify that for the given path, some of the blocks or block src
+   * locations(src nodes) under the given path will be scheduled for block
+   * movement.
+   *
+   * For example, there are two block for a file:
+   *
+   * File1 => blk_1[locations=A(DISK),B(DISK),C(DISK)],
+   * blk_2[locations=A(DISK),B(DISK),C(DISK)]. Now, set storage policy to COLD.
+   * Only one datanode is available with storage type ARCHIVE, say D.
+   *
+   * SPS will schedule block movement to the coordinator node with the details,
+   * blk_1[move A(DISK) -> D(ARCHIVE)], blk_2[move A(DISK) -> D(ARCHIVE)].
+   */
+  @Test(timeout = 300000)
+  public void testWhenOnlyFewTargetDatanodeAreAvailableToSatisfyStoragePolicy()
+      throws Exception {
+    try {
+      createCluster();
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(FILE), COLD);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE}};
+
+      // Adding ARCHIVE based datanodes.
+      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier identified that block to move to
+      // ARCHIVE area.
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 1, 30000,
+          dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
+          dfs);
+
+      waitForBlocksMovementAttemptReport(1, 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify that for the given path, no blocks or block src
+   * locations(src nodes) under the given path will be scheduled for block
+   * movement as there are no available datanode with required storage type.
+   *
+   * For example, there are two block for a file:
+   *
+   * File1 => blk_1[locations=A(DISK),B(DISK),C(DISK)],
+   * blk_2[locations=A(DISK),B(DISK),C(DISK)]. Now, set storage policy to COLD.
+   * No datanode is available with storage type ARCHIVE.
+   *
+   * SPS won't schedule any block movement for this path.
+   */
+  @Test(timeout = 300000)
+  public void testWhenNoTargetDatanodeToSatisfyStoragePolicy()
+      throws Exception {
+    try {
+      createCluster();
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(FILE), COLD);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.DISK, StorageType.DISK}};
+      // Adding DISK based datanodes
+      startAdditionalDNs(config, 1, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+
+      // No block movement will be scheduled as there is no target node
+      // available with the required storage type.
+      waitForAttemptedItems(1, 30000);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
+          dfs);
+      // Since there is no target node the item will get timed out and then
+      // re-attempted.
+      waitForAttemptedItems(1, 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test to verify that satisfy worker can't move blocks. If the given block is
+   * pinned it shouldn't be considered for retries.
+   */
+  @Test(timeout = 120000)
+  public void testMoveWithBlockPinning() throws Exception {
+    try{
+      config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
+      hdfsCluster = startCluster(config, allDiskTypes, 3, 2, CAPACITY);
+
+      hdfsCluster.waitActive();
+      dfs = hdfsCluster.getFileSystem();
+
+      // create a file with replication factor 3 and mark 2 pinned block
+      // locations.
+      final String file1 = createFileAndSimulateFavoredNodes(2);
+
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(file1), COLD);
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE},
+              {StorageType.ARCHIVE, StorageType.ARCHIVE}};
+      // Adding DISK based datanodes
+      startAdditionalDNs(config, 3, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+
+      dfs.satisfyStoragePolicy(new Path(file1));
+      hdfsCluster.triggerHeartbeats();
+
+      // No block movement will be scheduled as there is no target node
+      // available with the required storage type.
+      waitForAttemptedItems(1, 30000);
+      waitForBlocksMovementAttemptReport(1, 30000);
+      DFSTestUtil.waitExpectedStorageType(
+          file1, StorageType.ARCHIVE, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(
+          file1, StorageType.DISK, 2, 30000, dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests to verify that for the given path, only few of the blocks or block
+   * src locations(src nodes) under the given path will be scheduled for block
+   * movement.
+   *
+   * For example, there are two block for a file:
+   *
+   * File1 => two blocks and default storage policy(HOT).
+   * blk_1[locations=A(DISK),B(DISK),C(DISK),D(DISK),E(DISK)],
+   * blk_2[locations=A(DISK),B(DISK),C(DISK),D(DISK),E(DISK)].
+   *
+   * Now, set storage policy to COLD.
+   * Only two Dns are available with expected storage type ARCHIVE, say A, E.
+   *
+   * SPS will schedule block movement to the coordinator node with the details,
+   * blk_1[move A(DISK) -> A(ARCHIVE), move E(DISK) -> E(ARCHIVE)],
+   * blk_2[move A(DISK) -> A(ARCHIVE), move E(DISK) -> E(ARCHIVE)].
+   */
+  @Test(timeout = 300000)
+  public void testWhenOnlyFewSourceNodesHaveMatchingTargetNodes()
+      throws Exception {
+    try {
+      int numOfDns = 5;
+      config.setLong("dfs.block.size", 1024);
+      allDiskTypes =
+          new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+              {StorageType.DISK, StorageType.DISK},
+              {StorageType.DISK, StorageType.DISK},
+              {StorageType.DISK, StorageType.DISK},
+              {StorageType.DISK, StorageType.ARCHIVE}};
+      hdfsCluster = startCluster(config, allDiskTypes, numOfDns,
+          STORAGES_PER_DATANODE, CAPACITY);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(FILE, (short) 5);
+
+      // Change policy to COLD
+      dfs.setStoragePolicy(new Path(FILE), COLD);
+
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+      // Wait till StorgePolicySatisfier identified that block to move to
+      // ARCHIVE area.
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 2, 30000,
+          dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
+          dfs);
+
+      waitForBlocksMovementAttemptReport(1, 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests that moving block storage with in the same datanode. Let's say we
+   * have DN1[DISK,ARCHIVE], DN2[DISK, SSD], DN3[DISK,RAM_DISK] when
+   * storagepolicy set to ONE_SSD and request satisfyStoragePolicy, then block
+   * should move to DN2[SSD] successfully.
+   */
+  @Test(timeout = 300000)
+  public void testBlockMoveInSameDatanodeWithONESSD() throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.RAM_DISK}};
+    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    try {
+      hdfsCluster = startCluster(config, diskTypes, NUM_OF_DATANODES,
+          STORAGES_PER_DATANODE, CAPACITY);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(FILE);
+
+      // Change policy to ONE_SSD
+      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
+
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 1, 30000, dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
+          dfs);
+
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests that moving block storage with in the same datanode and remote node.
+   * Let's say we have DN1[DISK,ARCHIVE], DN2[ARCHIVE, SSD], DN3[DISK,DISK],
+   * DN4[DISK,DISK] when storagepolicy set to WARM and request
+   * satisfyStoragePolicy, then block should move to DN1[ARCHIVE] and
+   * DN2[ARCHIVE] successfully.
+   */
+  @Test(timeout = 300000)
+  public void testBlockMoveInSameAndRemoteDatanodesWithWARM() throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
+            {StorageType.ARCHIVE, StorageType.SSD},
+            {StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.DISK}};
+
+    config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    try {
+      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+          STORAGES_PER_DATANODE, CAPACITY);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(FILE);
+
+      // Change policy to WARM
+      dfs.setStoragePolicy(new Path(FILE), "WARM");
+      dfs.satisfyStoragePolicy(new Path(FILE));
+      hdfsCluster.triggerHeartbeats();
+
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 1, 30000,
+          dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 2, 30000,
+          dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * If replica with expected storage type already exist in source DN then that
+   * DN should be skipped.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
+      throws Exception {
+    StorageType[][] diskTypes = new StorageType[][] {
+        {StorageType.DISK, StorageType.ARCHIVE},
+        {StorageType.DISK, StorageType.ARCHIVE},
+        {StorageType.DISK, StorageType.ARCHIVE}};
+
+    try {
+      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+          STORAGES_PER_DATANODE, CAPACITY);
+      dfs = hdfsCluster.getFileSystem();
+      // 1. Write two replica on disk
+      DFSTestUtil.createFile(dfs, new Path(FILE), DEFAULT_BLOCK_SIZE,
+          (short) 2, 0);
+      // 2. Change policy to COLD, so third replica will be written to ARCHIVE.
+      dfs.setStoragePolicy(new Path(FILE), "COLD");
+
+      // 3.Change replication factor to 3.
+      dfs.setReplication(new Path(FILE), (short) 3);
+
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 2, 30000,
+          dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.ARCHIVE, 1, 30000,
+          dfs);
+
+      // 4. Change policy to HOT, so we can move the all block to DISK.
+      dfs.setStoragePolicy(new Path(FILE), "HOT");
+
+      // 4. Satisfy the policy.
+      dfs.satisfyStoragePolicy(new Path(FILE));
+
+      // 5. Block should move successfully .
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
+          dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests that movements should not be assigned when there is no space in
+   * target DN.
+   */
+  @Test(timeout = 300000)
+  public void testChooseInSameDatanodeWithONESSDShouldNotChooseIfNoSpace()
+      throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.DISK, StorageType.DISK},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.DISK}};
+    config.setLong("dfs.block.size", 2 * DEFAULT_BLOCK_SIZE);
+    long dnCapacity = 1024 * DEFAULT_BLOCK_SIZE + (2 * DEFAULT_BLOCK_SIZE - 1);
+    try {
+      hdfsCluster = startCluster(config, diskTypes, NUM_OF_DATANODES,
+          STORAGES_PER_DATANODE, dnCapacity);
+      dfs = hdfsCluster.getFileSystem();
+      writeContent(FILE);
+
+      // Change policy to ONE_SSD
+      dfs.setStoragePolicy(new Path(FILE), ONE_SSD);
+      Path filePath = new Path("/testChooseInSameDatanode");
+      final FSDataOutputStream out =
+          dfs.create(filePath, false, 100, (short) 1, 2 * DEFAULT_BLOCK_SIZE);
+      try {
+        dfs.setStoragePolicy(filePath, ONE_SSD);
+        // Try to fill up SSD part by writing content
+        long remaining = dfs.getStatus().getRemaining() / (3 * 2);
+        for (int i = 0; i < remaining; i++) {
+          out.write(i);
+        }
+      } finally {
+        out.close();
+      }
+      hdfsCluster.triggerHeartbeats();
+      ArrayList<DataNode> dataNodes = hdfsCluster.getDataNodes();
+      // Temporarily disable heart beats, so that we can assert whether any
+      // items schedules for DNs even though DN's does not have space to write.
+      // Disabling heart beats can keep scheduled items on DatanodeDescriptor
+      // itself.
+      for (DataNode dataNode : dataNodes) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNode, true);
+      }
+      dfs.satisfyStoragePolicy(new Path(FILE));
+
+      // Wait for items to be processed
+      waitForAttemptedItems(1, 30000);
+
+      // Enable heart beats now
+      for (DataNode dataNode : dataNodes) {
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dataNode, false);
+      }
+      hdfsCluster.triggerHeartbeats();
+
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.DISK, 3, 30000,
+          dfs);
+      DFSTestUtil.waitExpectedStorageType(FILE, StorageType.SSD, 0, 30000, dfs);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Tests that Xattrs should be cleaned if satisfy storage policy called on EC
+   * file with unsuitable storage policy set.
+   *
+   * @throws Exception
+   */
+  @Test(timeout = 300000)
+  public void testSPSShouldNotLeakXattrIfSatisfyStoragePolicyCallOnECFiles()
+      throws Exception {
+    StorageType[][] diskTypes =
+        new StorageType[][]{{StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.SSD, StorageType.DISK},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD},
+            {StorageType.DISK, StorageType.SSD}};
+
+    int defaultStripedBlockSize =
+        StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4;
+    config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
+    config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    config.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+        1L);
+    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+        false);
+    try {
+      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+          STORAGES_PER_DATANODE, CAPACITY);
+      dfs = hdfsCluster.getFileSystem();
+      dfs.enableErasureCodingPolicy(
+          StripedFileTestUtil.getDefaultECPolicy().getName());
+
+      // set "/foo" directory with ONE_SSD storage policy.
+      ClientProtocol client = NameNodeProxies.createProxy(config,
+          hdfsCluster.getFileSystem(0).getUri(), ClientProtocol.class)
+          .getProxy();
+      String fooDir = "/foo";
+      client.mkdirs(fooDir, new FsPermission((short) 777), true);
+      // set an EC policy on "/foo" directory
+      client.setErasureCodingPolicy(fooDir,
+          StripedFileTestUtil.getDefaultECPolicy().getName());
+
+      // write file to fooDir
+      final String testFile = "/foo/bar";
+      long fileLen = 20 * defaultStripedBlockSize;
+      DFSTestUtil.createFile(dfs, new Path(testFile), fileLen, (short) 3, 0);
+
+      // ONESSD is unsuitable storage policy on EC files
+      client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
+      dfs.satisfyStoragePolicy(new Path(testFile));
+
+      // Thread.sleep(9000); // To make sure SPS triggered
+      // verify storage types and locations
+      LocatedBlocks locatedBlocks =
+          client.getBlockLocations(testFile, 0, fileLen);
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (StorageType type : lb.getStorageTypes()) {
+          Assert.assertEquals(StorageType.DISK, type);
+        }
+      }
+
+      // Make sure satisfy xattr has been removed.
+      DFSTestUtil.waitForXattrRemoved(testFile, XATTR_SATISFY_STORAGE_POLICY,
+          hdfsCluster.getNamesystem(), 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+    try {
+      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
+          STORAGES_PER_DATANODE, CAPACITY);
+      hdfsCluster.waitActive();
+      DistributedFileSystem fs = hdfsCluster.getFileSystem();
+      Path filePath = new Path("/zeroSizeFile");
+      DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+      FSEditLog editlog = hdfsCluster.getNameNode().getNamesystem()
+          .getEditLog();
+      long lastWrittenTxId = editlog.getLastWrittenTxId();
+      fs.satisfyStoragePolicy(filePath);
+      Assert.assertEquals("Xattr should not be added for the file",
+          lastWrittenTxId, editlog.getLastWrittenTxId());
+      INode inode = hdfsCluster.getNameNode().getNamesystem().getFSDirectory()
+          .getINode(filePath.toString());
+      Assert.assertTrue("XAttrFeature should be null for file",
+          inode.getXAttrFeature() == null);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test SPS for low redundant file blocks.
+   * 1. Create cluster with 3 datanode.
+   * 1. Create one file with 3 replica.
+   * 2. Set policy and call satisfyStoragePolicy for file.
+   * 3. Stop NameNode and Datanodes.
+   * 4. Start NameNode with 2 datanode and wait for block movement.
+   * 5. Start third datanode.
+   * 6. Third Datanode replica also should be moved in proper
+   * sorage based on policy.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
+    try {
+      config.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+          "3000");
+      config.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
+          "5000");
+      StorageType[][] newtypes = new StorageType[][] {
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK}};
+      hdfsCluster = startCluster(config, newtypes, 3, 2, CAPACITY);
+      hdfsCluster.waitActive();
+      DistributedFileSystem fs = hdfsCluster.getFileSystem();
+      Path filePath = new Path("/zeroSizeFile");
+      DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0);
+      fs.setStoragePolicy(filePath, "COLD");
+      List<DataNodeProperties> list = new ArrayList<>();
+      list.add(hdfsCluster.stopDataNode(0));
+      list.add(hdfsCluster.stopDataNode(0));
+      list.add(hdfsCluster.stopDataNode(0));
+      restartNamenode();
+      hdfsCluster.restartDataNode(list.get(0), false);
+      hdfsCluster.restartDataNode(list.get(1), false);
+      hdfsCluster.waitActive();
+      fs.satisfyStoragePolicy(filePath);
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 2, 30000, hdfsCluster.getFileSystem());
+      hdfsCluster.restartDataNode(list.get(2), false);
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 3, 30000, hdfsCluster.getFileSystem());
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test SPS for extra redundant file blocks.
+   * 1. Create cluster with 5 datanode.
+   * 2. Create one file with 5 replica.
+   * 3. Set file replication to 3.
+   * 4. Set policy and call satisfyStoragePolicy for file.
+   * 5. Block should be moved successfully.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWhenFileHasExcessRedundancyBlocks() throws Exception {
+    try {
+      config.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+          "3000");
+      config.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
+          "5000");
+      StorageType[][] newtypes = new StorageType[][] {
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK}};
+      hdfsCluster = startCluster(config, newtypes, 5, 2, CAPACITY);
+      hdfsCluster.waitActive();
+      DistributedFileSystem fs = hdfsCluster.getFileSystem();
+      Path filePath = new Path("/zeroSizeFile");
+      DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0);
+      fs.setReplication(filePath, (short) 3);
+      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+          LogFactory.getLog(BlockStorageMovementAttemptedItems.class));
+      fs.setStoragePolicy(filePath, "COLD");
+      fs.satisfyStoragePolicy(filePath);
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 3, 60000, hdfsCluster.getFileSystem());
+      assertFalse("Log output does not contain expected log message: ",
+          logs.getOutput().contains("some of the blocks are low redundant"));
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test SPS for empty directory, xAttr should be removed.
+   */
+  @Test(timeout = 300000)
+  public void testSPSForEmptyDirectory() throws IOException, TimeoutException,
+      InterruptedException {
+    try {
+      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
+          STORAGES_PER_DATANODE, CAPACITY);
+      hdfsCluster.waitActive();
+      DistributedFileSystem fs = hdfsCluster.getFileSystem();
+      Path emptyDir = new Path("/emptyDir");
+      fs.mkdirs(emptyDir);
+      fs.satisfyStoragePolicy(emptyDir);
+      // Make sure satisfy xattr has been removed.
+      DFSTestUtil.waitForXattrRemoved("/emptyDir",
+          XATTR_SATISFY_STORAGE_POLICY, hdfsCluster.getNamesystem(), 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test SPS for not exist directory.
+   */
+  @Test(timeout = 300000)
+  public void testSPSForNonExistDirectory() throws Exception {
+    try {
+      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
+          STORAGES_PER_DATANODE, CAPACITY);
+      hdfsCluster.waitActive();
+      DistributedFileSystem fs = hdfsCluster.getFileSystem();
+      Path emptyDir = new Path("/emptyDir");
+      try {
+        fs.satisfyStoragePolicy(emptyDir);
+        fail("FileNotFoundException should throw");
+      } catch (FileNotFoundException e) {
+        // nothing to do
+      }
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test SPS for directory tree which doesn't have files.
+   */
+  @Test(timeout = 300000)
+  public void testSPSWithDirectoryTreeWithoutFile() throws Exception {
+    try {
+      hdfsCluster = startCluster(config, allDiskTypes, NUM_OF_DATANODES,
+          STORAGES_PER_DATANODE, CAPACITY);
+      hdfsCluster.waitActive();
+      // Create directories
+      /*
+       *                   root
+       *                    |
+       *           A--------C--------D
+       *                    |
+       *               G----H----I
+       *                    |
+       *                    O
+       */
+      DistributedFileSystem fs = hdfsCluster.getFileSystem();
+      fs.mkdirs(new Path("/root/C/H/O"));
+      fs.mkdirs(new Path("/root/A"));
+      fs.mkdirs(new Path("/root/D"));
+      fs.mkdirs(new Path("/root/C/G"));
+      fs.mkdirs(new Path("/root/C/I"));
+      fs.satisfyStoragePolicy(new Path("/root"));
+      // Make sure satisfy xattr has been removed.
+      DFSTestUtil.waitForXattrRemoved("/root",
+          XATTR_SATISFY_STORAGE_POLICY, hdfsCluster.getNamesystem(), 30000);
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test SPS for directory which has multilevel directories.
+   */
+  @Test(timeout = 300000)
+  public void testMultipleLevelDirectoryForSatisfyStoragePolicy()
+      throws Exception {
+    try {
+      StorageType[][] diskTypes = new StorageType[][] {
+          {StorageType.DISK, StorageType.ARCHIVE},
+          {StorageType.ARCHIVE, StorageType.SSD},
+          {StorageType.DISK, StorageType.DISK}};
+      config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+      hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+          STORAGES_PER_DATANODE, CAPACITY);
+      dfs = hdfsCluster.getFileSystem();
+      createDirectoryTree(dfs);
+
+      List<String> files = getDFSListOfTree();
+      dfs.setStoragePolicy(new Path("/root"), COLD);
+      dfs.satisfyStoragePolicy(new Path("/root"));
+      for (String fileName : files) {
+        // Wait till the block is moved to ARCHIVE
+        DFSTestUtil.waitExpectedStorageType(fileName, StorageType.ARCHIVE, 2,
+            30000, dfs);
+      }
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  /**
+   * Test storage move blocks while under replication block tasks exists in the
+   * system. So, both will share the max transfer streams.
+   *
+   * 1. Create cluster with 3 datanode.
+   * 2. Create 20 files with 2 replica.
+   * 3. Start 2 more DNs with DISK & SSD types
+   * 4. SetReplication factor for the 1st 10 files to 4 to trigger replica task
+   * 5. Set policy to SSD to the 2nd set of files from 11-20
+   * 6. Call SPS for 11-20 files to trigger move block tasks to new DNs
+   * 7. Wait for the under replica and SPS tasks completion
+   */
+  @Test(timeout = 300000)
+  public void testMoveBlocksWithUnderReplicatedBlocks() throws Exception {
+    try {
+      config.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 3);
+      config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+      config.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
+          "3000");
+      config.set(DFSConfigKeys
+          .DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY,
+          "5000");
+
+      StorageType[][] storagetypes = new StorageType[][] {
+          {StorageType.ARCHIVE, StorageType.DISK},
+          {StorageType.ARCHIVE, StorageType.DISK}};
+
+      hdfsCluster = startCluster(config, storagetypes, 2, 2, CAPACITY);
+      hdfsCluster.waitActive();
+      dfs = hdfsCluster.getFileSystem();
+
+      // Below files will be used for pending replication block tasks.
+      for (int i=1; i<=20; i++){
+        Path filePath = new Path("/file" + i);
+        DFSTestUtil.createFile(dfs, filePath, DEFAULT_BLOCK_SIZE * 5, (short) 2,
+            0);
+      }
+
+      StorageType[][] newtypes =
+          new StorageType[][]{{StorageType.DISK, StorageType.SSD},
+              {StorageType.DISK, StorageType.SSD}};
+      startAdditionalDNs(config, 2, NUM_OF_DATANODES, newtypes,
+          STORAGES_PER_DATANODE, CAPACITY, hdfsCluster);
+
+      // increase replication factor to 4 for the first 10 files and thus
+      // initiate replica tasks
+      for (int i=1; i<=10; i++){
+        Path filePath = new Path("/file" + i);
+        dfs.setReplication(filePath, (short) 4);
+      }
+
+      // invoke SPS for 11-20 files
+      for (int i = 11; i <= 20; i++) {
+        Path filePath = new Path("/file" + i);
+        dfs.setStoragePolicy(filePath, "ALL_SSD");
+        dfs.satisfyStoragePolicy(filePath);
+      }
+
+      for (int i = 1; i <= 10; i++) {
+        Path filePath = new Path("/file" + i);
+        DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+            StorageType.DISK, 4, 60000, hdfsCluster.getFileSystem());
+      }
+      for (int i = 11; i <= 20; i++) {
+        Path filePath = new Path("/file" + i);
+        DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+            StorageType.SSD, 2, 30000, hdfsCluster.getFileSystem());
+      }
+    } finally {
+      shutdownCluster();
+    }
+  }
+
+  private static void createDirectoryTree(DistributedFileSystem dfs)
+      throws Exception {
+    // tree structure
+    /*
+     *                           root
+     *                             |
+     *           A--------B--------C--------D--------E
+     *                    |                 |
+     *          F----G----H----I       J----K----L----M
+     *               |                           |
+     *          N----O----P                 Q----R----S
+     *                    |                 |
+     *                    T                 U
+     */
+    // create root Node and child
+    dfs.mkdirs(new Path("/root"));
+    DFSTestUtil.createFile(dfs, new Path("/root/A"), 1024, (short) 3, 0);
+    dfs.mkdirs(new Path("/root/B"));
+    DFSTestUtil.createFile(dfs, new Path("/root/C"), 1024, (short) 3, 0);
+    dfs.mkdirs(new Path("/root/D"));
+    DFSTestUtil.createFile(dfs, new Path("/root/E"), 1024, (short) 3, 0);
+
+    // Create /root/B child
+    DFSTestUtil.createFile(dfs, new Path("/root/B/F"), 1024, (short) 3, 0);
+    dfs.mkdirs(new Path("/root/B/G"));
+    DFSTestUtil.createFile(dfs, new Path("/root/B/H"), 1024, (short) 3, 0);
+    DFSTestUtil.createFile(dfs, new Path("/root/B/I"), 1024, (short) 3, 0);
+
+    // Create /root/D child
+    DFSTestUtil.createFile(dfs, new Path("/root/D/J"), 1024, (short) 3, 0);
+    DFSTestUtil.createFile(dfs, new Path("/root/D/K"), 1024, (short) 3, 0);
+    dfs.mkdirs(new Path("/root/D/L"));
+    DFSTestUtil.createFile(dfs, new Path("/root/D/M"), 1024, (short) 3, 0);
+
+    // Create /root/B/G child
+    DFSTestUtil.createFile(dfs, new Path("/root/B/G/N"), 1024, (short) 3, 0);
+    DFSTestUtil.createFile(dfs, new Path("/root/B/G/O"), 1024, (short) 3, 0);
+    dfs.mkdirs(new Path("/root/B/G/P"));
+
+    // Create /root/D/L child
+    dfs.mkdirs(new Path("/root/D/L/Q"));
+    DFSTestUtil.createFile(dfs, new Path("/root/D/L/R"), 1024, (short) 3, 0);
+    DFSTestUtil.createFile(dfs, new Path("/root/D/L/S"), 1024, (short) 3, 0);
+
+    // Create /root/B/G/P child
+    DFSTestUtil.createFile(dfs, new Path("/root/B/G/P/T"), 1024, (short) 3, 0);
+
+    // Create /root/D/L/Q child
+    DFSTestUtil.createFile(dfs, new Path("/root/D/L/Q/U"), 1024, (short) 3, 0);
+  }
+
+  private List<String> getDFSListOfTree() {
+    List<String> dfsList = new ArrayList<>();
+    dfsList.add("/root/A");
+    dfsList.add("/root/B/F");
+    dfsList.add("/root/B/G/N");
+    dfsList.add("/root/B/G/O");
+    dfsList.add("/root/B/G/P/T");
+    dfsList.add("/root/B/H");
+    dfsList.add("/root/B/I");
+    dfsList.add("/root/C");
+    dfsList.add("/root/D/J");
+    dfsList.add("/root/D/K");
+    dfsList.add("/root/D/L/Q/U");
+    dfsList.add("/root/D/L/R");
+    dfsList.add("/root/D/L/S");
+    dfsList.add("/root/D/M");
+    dfsList.add("/root/E");
+    return dfsList;
+  }
+
+  private String createFileAndSimulateFavoredNodes(int favoredNodesCount)
+      throws IOException {
+    ArrayList<DataNode> dns = hdfsCluster.getDataNodes();
+    final String file1 = "/testMoveWithBlockPinning";
+    // replication factor 3
+    InetSocketAddress[] favoredNodes = new InetSocketAddress[favoredNodesCount];
+    for (int i = 0; i < favoredNodesCount; i++) {
+      favoredNodes[i] = dns.get(i).getXferAddress();
+    }
+    DFSTestUtil.createFile(dfs, new Path(file1), false, 1024, 100,
+        DEFAULT_BLOCK_SIZE, (short) 3, 0, false, favoredNodes);
+
+    LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
+    Assert.assertEquals("Wrong block count", 1,
+        locatedBlocks.locatedBlockCount());
+
+    // verify storage type before movement
+    LocatedBlock lb = locatedBlocks.get(0);
+    StorageType[] storageTypes = lb.getStorageTypes();
+    for (StorageType storageType : storageTypes) {
+      Assert.assertTrue(StorageType.DISK == storageType);
+    }
+
+    // Mock FsDatasetSpi#getPinning to show that the block is pinned.
+    DatanodeInfo[] locations = lb.getLocations();
+    Assert.assertEquals(3, locations.length);
+    Assert.assertTrue(favoredNodesCount < locations.length);
+    for(DatanodeInfo dnInfo: locations){
+      LOG.info("Simulate block pinning in datanode {}",
+          locations[favoredNodesCount]);
+      DataNode dn = hdfsCluster.getDataNode(dnInfo.getIpcPort());
+      InternalDataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
+      favoredNodesCount--;
+      if (favoredNodesCount <= 0) {
+        break; // marked favoredNodesCount number of pinned block location
+      }
+    }
+    return file1;
+  }
+
+  public void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
+      int timeout) throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        LOG.info("expectedAttemptedItemsCount={} actualAttemptedItemsCount={}",
+            expectedBlkMovAttemptedCount,
+            ((BlockStorageMovementAttemptedItems) (externalSps
+                .getAttemptedItemsMonitor())).getAttemptedItemsCount());
+        return ((BlockStorageMovementAttemptedItems) (externalSps
+            .getAttemptedItemsMonitor()))
+                .getAttemptedItemsCount() == expectedBlkMovAttemptedCount;
+      }
+    }, 100, timeout);
+  }
+
+  public void waitForBlocksMovementAttemptReport(
+      long expectedMovementFinishedBlocksCount, int timeout)
+          throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        int actualCount = externalSps.getAttemptedItemsMonitor()
+            .getAttemptedItemsCount();
+        LOG.info("MovementFinishedBlocks: expectedCount={} actualCount={}",
+            expectedMovementFinishedBlocksCount, actualCount);
+        return actualCount
+            >= expectedMovementFinishedBlocksCount;
+      }
+    }, 100, timeout);
+  }
+
+  public void writeContent(final String fileName) throws IOException {
+    writeContent(fileName, (short) 3);
+  }
+
+  private void writeContent(final String fileName, short replicatonFactor)
+      throws IOException {
+    // write to DISK
+    final FSDataOutputStream out = dfs.create(new Path(fileName),
+        replicatonFactor);
+    for (int i = 0; i < 1024; i++) {
+      out.write(i);
+    }
+    out.close();
+  }
+
+  private void startAdditionalDNs(final Configuration conf,
+      int newNodesRequired, int existingNodesNum, StorageType[][] newTypes,
+      int storagesPerDn, long nodeCapacity, final MiniDFSCluster cluster)
+          throws IOException {
+    long[][] capacities;
+    existingNodesNum += newNodesRequired;
+    capacities = new long[newNodesRequired][storagesPerDn];
+    for (int i = 0; i < newNodesRequired; i++) {
+      for (int j = 0; j < storagesPerDn; j++) {
+        capacities[i][j] = nodeCapacity;
+      }
+    }
+
+    cluster.startDataNodes(conf, newNodesRequired, newTypes, true, null, null,
+        null, capacities, null, false, false, false, null);
+    cluster.triggerHeartbeats();
+  }
+
+  /**
+   * Implementation of listener callback, where it collects all the sps move
+   * attempted blocks for assertion.
+   */
+  public static final class ExternalBlockMovementListener
+      implements BlockMovementListener {
+
+    private List<Block> actualBlockMovements = new ArrayList<>();
+
+    @Override
+    public void notifyMovementTriedBlocks(Block[] moveAttemptFinishedBlks) {
+      for (Block block : moveAttemptFinishedBlks) {
+        actualBlockMovements.add(block);
+      }
+      LOG.info("Movement attempted blocks:{}", actualBlockMovements);
+    }
+
+    public List<Block> getActualBlockMovements() {
+      return actualBlockMovements;
+    }
+
+    public void clear() {
+      actualBlockMovements.clear();
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index 28838a6..ad77684 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -50,7 +50,7 @@
   public void clusterSetUp() throws IOException, URISyntaxException {
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
+        StoragePolicySatisfierMode.EXTERNAL.toString());
     StorageType[][] newtypes = new StorageType[][] {
         {StorageType.ARCHIVE, StorageType.DISK}};
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
index 8a62e0e..1ab7788 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
@@ -29,6 +29,11 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
+import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.sps.Context;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
+import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -43,12 +48,13 @@
   private Configuration conf = null;
   private MiniDFSCluster cluster = null;
   private DistributedFileSystem dfs = null;
+  private StoragePolicySatisfier externalSps = null;
 
   @Before
   public void clusterSetUp() throws IOException, URISyntaxException {
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.INTERNAL.toString());
+        StoragePolicySatisfierMode.EXTERNAL.toString());
     // Reduced refresh cycle to update latest datanodes.
     conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
         1000);
@@ -58,6 +64,14 @@
         .storageTypes(newtypes).build();
     cluster.waitActive();
     dfs = cluster.getFileSystem();
+    NameNodeConnector nnc = DFSTestUtil.getNameNodeConnector(conf,
+        HdfsServerConstants.MOVER_ID_PATH, 1, false);
+
+    StoragePolicySatisfier externalSps = new StoragePolicySatisfier(conf);
+    Context externalCtxt = new ExternalSPSContext(externalSps, nnc);
+
+    externalSps.init(externalCtxt);
+    externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL);
   }
 
   @After
@@ -70,6 +84,9 @@
       cluster.shutdown();
       cluster = null;
     }
+    if (externalSps != null) {
+      externalSps.stopGracefully();
+    }
   }
 
   @Test(timeout = 30000)
@@ -92,41 +109,4 @@
     DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
         dfs);
   }
-
-  @Test(timeout = 30000)
-  public void testIsSatisfierRunningCommand() throws Exception {
-    final String file = "/testIsSatisfierRunningCommand";
-    DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);
-    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
-    DFSTestUtil.toolRun(admin, "-isInternalSatisfierRunning", 0, "yes");
-
-    cluster.getNameNode().reconfigureProperty(
-        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-        StoragePolicySatisfierMode.NONE.toString());
-    cluster.waitActive();
-
-    DFSTestUtil.toolRun(admin, "-isInternalSatisfierRunning", 0, "no");
-
-    // Test with unnecessary args
-    DFSTestUtil.toolRun(admin, "-isInternalSatisfierRunning status", 1,
-        "Can't understand arguments: ");
-  }
-
-  @Test(timeout = 90000)
-  public void testSatisfyStoragePolicyCommandWithWaitOption()
-      throws Exception {
-    final String file = "/testSatisfyStoragePolicyCommandWithWaitOption";
-    DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);
-
-    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
-
-    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file
-        + " -policy COLD", 0, "Set storage policy COLD on " + file.toString());
-
-    DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -w -path " + file, 0,
-        "Waiting for satisfy the policy");
-
-    DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
-        dfs);
-  }
 }